code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
This module defines the Gate class, which represents a quantum gate, as well as implementations of many common Gates.
Through the use of KroneckerGate and ProductGate, Gates can be formed for complex circuit structures. The matrix and mat_jac functions are used for numerical optimization of parameterized gates. The assemble function is used to generate an intermediate language of tuples that can be used by Assemblers to output descriptions of quantum circuits in other formats.
"""
import numpy as np
from . import utils, unitaries
from hashlib import md5
try:
from qsrs import native_from_object
except ImportError:
native_from_object = None
class Gate():
"""This class shows the framework for working with quantum gates in Qsearch."""
def __init__(self):
"""Gates must set the following variables in __init__
self.num_inputs : The number of parameters needed to generate a unitary. This can be 0.
self.qudits : The number of qudits acted on by a unitary of the size generated by the gate. For example, this would be 1 for U3, 2 for CNOT.
"""
raise NotImplementedError("Subclasses of Gate should declare their own initializers.")
def matrix(self, v):
"""Generates a matrix using the given vector of input parameters. For a constant gate, v will be empty.
Args:
v : A numpy array of real floating point numbers, ranging from 0 to 2*PI. Its size is equal to self.num_inputs
Returns:
np.ndarray : A unitary matrix with dtype="complex128", equal in size to d**self.qudits, where d is the intended
qudit size (d is 2 for qubits, 3 for qutrits, etc.)
"""
raise NotImplementedError("Subclasses of Gate are required to implement the matrix(v) method.")
def mat_jac(self, v):
"""Generates a matrix and the jacobian(s) using the given vector of input parameters.
It is not required to implement mat_jac for constant gates, nor is it required when using gradient-free Solvers.
The jacobian matrices will be complex valued, and should be the elementwise partial derivative with respect to each of the parameters.
There should be self.num_inputs matrices in the array, with the ith entry being the partial derivative with respect to v[i].
See U3Gate for an example implementation.
Args:
v : A numpy array of real floating point numbers, ranging from 0 to 2*PI. Its size is equal to self.num_inputs
Returns:
tuple : A tuple of the same unitary that would be returned by matrix(v), and an array of Jacobian matrices.
"""
if self.num_inputs == 0:
return (self.matrix(v), []) # A constant gate (one with no parameters) has no jacobian
raise NotImplementedError("Subclasses of Gate are required to implement the mat_jac(v) method in order to be used with gradient optimizers.")
def assemble(self, v, i=0):
"""Generates an array of tuples as an intermediate format before being processed by an Assembler for conversion to other circuit formats.
Args:
v : The same numpy array of real floating point numbers that might be passed to matrix(v).
i : The index of the lowest-indexed qubit that the unitary generated by the gate acts on.
Returns:
list : A list of tuples following the format described above.
The format of the tuples returned looks like:
`("gate", gatename, (*gateparameters), (*gateindices))`
Where `gatename` corresponds to a gate that an Assembler will recognize, `gateparameters` corresponds to the parameters for the specified gate
(usually but not always calculated from v), and `gateindices` corresponds to the qubit indices that the gate acts on (usually but not always calculated from i).
You can also have tuples of the form ("block", *tuples)
Where tuples is an array of tuples in this same format.
For some helpful examples, look at U3Gate, XZXZGate, CNOTGate, and NonadjacentCNOTGate.
"""
raise NotImplementedError("Subclasses of Gate are required to implement the assemble(v, i) method.")
def __eq__(self, other):
return repr(self) == repr(other)
def __hash__(self):
return int(md5(repr(self).encode()).hexdigest(), 16)
def copy(self):
return self
def _parts(self):
return [self]
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
def __repr__(self):
return "Gate()"
def validate_structure(self):
return True
class IdentityGate(Gate):
"""Represents an identity gate of any number of qudits of any size."""
def __init__(self, qudits=1, d=2):
"""
Args:
qudits : The number of qudits represented by this identity.
d : The size of qudits represented by this identity (2 for qubits, 3 for qutrits, etc.)
"""
self.num_inputs=0
self._I = np.array(np.eye(d**qudits), dtype='complex128')
self.qudits = qudits
self._d = d
def matrix(self, v):
return self._I
def assemble(self, v, i=0):
return []
def __repr__(self):
if self.qudits == 1 and self._d == 2:
return "IdentityGate()"
else:
return "IdentityGate(qudits={}, d={})".format(self.qudits, self._d)
class XGate(Gate):
"""Represents a parameterized X rotation on one qubit."""
def __init__(self):
self.num_inputs = 1
self.qudits = 1
def matrix(self, v):
return unitaries.rot_x(v[0])
def mat_jac(self, v):
U = unitaries.rot_x(v[0])
J1 = unitaries.rot_x_jac(v[0])
return U, [J1]
def assemble(self, v, i=0):
return [("gate", "X", (v[0],), (i,))]
def __repr__(self):
return "XGate()"
class YGate(Gate):
"""Represents a parameterized Y rotation on one qubit."""
def __init__(self):
self.num_inputs = 1
self.qudits = 1
def matrix(self, v):
return unitaries.rot_y(v[0])
def mat_jac(self, v):
U = unitaries.rot_y(v[0])
J1 = unitaries.rot_y_jac(v[0])
return U, [J1]
def assemble(self, v, i=0):
return [("gate", "Y", (v[0],), (i,))]
def __repr__(self):
return "YGate()"
class ZGate(Gate):
"""Represents a parameterized Z rotation on one qubit."""
def __init__(self):
self.num_inputs = 1
self.qudits = 1
def matrix(self, v):
return unitaries.rot_z(v[0])
def mat_jac(self, v):
U = unitaries.rot_z(v[0])
J1 = unitaries.rot_z_jac(v[0])
return U, [J1]
def assemble(self, v, i=0):
return [("gate", "Z", (v[0],), (i,))]
def __repr__(self):
return "ZGate()"
class ZXZXZGate(Gate):
"""Represents an arbitrary parameterized single-qubit gate, decomposed into 3 parameterized Z gates separated by X(PI/2) gates."""
def __init__(self):
self.num_inputs = 3
self.qudits = 1
self._x90 = unitaries.rot_x(np.pi/2)
self._rot_z = unitaries.rot_z(0)
self._out = np.array(np.eye(2), dtype='complex128')
self._buffer = np.array(np.eye(2), dtype = 'complex128')
def matrix(self, v):
utils.re_rot_z(v[0], self._rot_z)
self._out = np.dot(self._x90, self._rot_z, out=self._out)
utils.re_rot_z(v[1], self._rot_z)
self._buffer = np.dot(self._rot_z, self._out, out=self._buffer)
self._out = np.dot(self._x90, self._buffer, out=self._out)
utils.re_rot_z(v[2], self._rot_z)
return np.dot(self._rot_z, self._out)
def mat_jac(self, v):
utils.re_rot_z_jac(v[0], self._rot_z)
self._out = np.dot(self._x90, self._rot_z, out=self._out)
utils.re_rot_z(v[1], self._rot_z)
self._buffer = np.dot(self._rot_z, self._out, out=self._buffer)
self._out = np.dot(self._x90, self._buffer, out=self._out)
utils.re_rot_z(v[2], self._rot_z)
J1 = np.dot(self._rot_z, self._out)
utils.re_rot_z(v[0], self._rot_z)
self._out = np.dot(self._x90, self._rot_z, out=self._out)
utils.re_rot_z_jac(v[1], self._rot_z)
self._buffer = np.dot(self._rot_z, self._out, out=self._buffer)
self._out = np.dot(self._x90, self._buffer, out=self._out)
utils.re_rot_z(v[2], self._rot_z)
J2 = np.dot(self._rot_z, self._out)
utils.re_rot_z(v[0], self._rot_z)
self._out = np.dot(self._x90, self._rot_z, out=self._out)
utils.re_rot_z(v[1], self._rot_z)
self._buffer = np.dot(self._rot_z, self._out, out=self._buffer)
self._out = np.dot(self._x90, self._buffer, out=self._out)
utils.re_rot_z_jac(v[2], self._rot_z)
J3 = np.dot(self._rot_z, self._out)
utils.re_rot_z(v[2], self._rot_z)
U = np.dot(self._rot_z, self._out)
return (U, [J1, J2, J3])
def assemble(self, v, i=0):
out = []
v = np.array(v)%(2*np.pi) # confine the range of what we print to come up with nicer numbers at no loss of generality
out.append(("gate", "Z", (v[0],), (i,)))
out.append(("gate", "X", (np.pi/2,), (i,)))
out.append(("gate", "Z", (v[1],), (i,)))
out.append(("gate", "X", (np.pi/2,), (i,)))
out.append(("gate", "Z", (v[2],), (i,)))
return [("block", out)]
def __repr__(self):
return "ZXZXZGate()"
class XZXZGate(Gate):
"""Represents a partially parameterized single qubit gate, equivalent to ZXZXZ but without the first Z gate. This is useful because that first Z gate can commute through the control of a CNOT, thereby reducing the number of parameters we need to solve for."""
def __init__(self):
self.num_inputs = 2
self.qudits = 1
self._x90 = unitaries.rot_x(np.pi/2)
self._rot_z = unitaries.rot_z(0)
self._out = np.array(np.eye(2), dtype='complex128')
self._buffer = np.array(np.eye(2), dtype = 'complex128')
# need two buffers due to a bug in some implementations of numpy
def matrix(self, v):
utils.re_rot_z(v[0], self._rot_z)
self._buffer = np.dot(self._rot_z, self._x90, out=self._buffer)
self._out = np.dot(self._x90, self._buffer, out=self._out)
utils.re_rot_z(v[1], self._rot_z)
return np.dot(self._rot_z, self._out)
def mat_jac(self, v):
utils.re_rot_z_jac(v[0], self._rot_z)
self._buffer = np.dot(self._rot_z, self._x90, out=self._buffer)
self._out = np.dot(self._x90, self._buffer, out=self._out)
utils.re_rot_z(v[1], self._rot_z)
J1 = np.dot(self._rot_z, self._out)
utils.re_rot_z(v[0], self._rot_z)
self._buffer = np.dot(self._rot_z, self._x90, out=self._buffer)
self._out = np.dot(self._x90, self._buffer, out=self._out)
utils.re_rot_z_jac(v[1], self._rot_z)
J2 = np.dot(self._rot_z, self._out)
utils.re_rot_z(v[1], self._rot_z)
U = np.dot(self._rot_z, self._out)
return (U, [J1, J2])
def assemble(self, v, i=0):
out = []
v = np.array(v)%(2*np.pi) # confine the range of what we print to come up with nicer numbers at no loss of generality
out.append(("gate", "X", (np.pi/2,), (i,)))
out.append(("gate", "Z", (v[0],), (i,)))
out.append(("gate", "X", (np.pi/2,), (i,)))
out.append(("gate", "Z", (v[1],), (i,)))
return [("block", out)]
def __repr__(self):
return "XZXZGate()"
class U3Gate(Gate):
"""Represents an arbitrary parameterized single qubit gate, parameterized in the same way as IBM's U3 gate."""
def __init__(self):
self.num_inputs = 3
self.qudits = 1
def matrix(self, v):
ct = np.cos(v[0]/2)
st = np.sin(v[0]/2)
cp = np.cos(v[1])
sp = np.sin(v[1])
cl = np.cos(v[2])
sl = np.sin(v[2])
return np.array([[ct, -st * (cl + 1j * sl)], [st * (cp + 1j * sp), ct * (cl * cp - sl * sp + 1j * cl * sp + 1j * sl * cp)]], dtype='complex128')
def mat_jac(self, v):
ct = np.cos(v[0]/2)
st = np.sin(v[0]/2)
cp = np.cos(v[1])
sp = np.sin(v[1])
cl = np.cos(v[2])
sl = np.sin(v[2])
U = np.array([[ct, -st * (cl + 1j * sl)], [st * (cp + 1j * sp), ct * (cl * cp - sl * sp + 1j * cl * sp + 1j * sl * cp)]], dtype='complex128')
J1 = np.array([[-0.5*st, -0.5*ct * (cl + 1j * sl)], [0.5*ct * (cp + 1j * sp), -0.5*st * (cl * cp - sl * sp + 1j * cl * sp + 1j * sl * cp)]], dtype='complex128')
J2 = np.array([[0, 0], [st *(-sp + 1j * cp), ct *(cl * -sp - sl * cp + 1j * cl * cp + 1j * sl * -sp)]], dtype='complex128')
J3 = np.array([[0, -st *(-sl + 1j * cl)], [0, ct *(-sl * cp - cl * sp + 1j * -sl * sp + 1j * cl * cp)]], dtype='complex128')
return (U, [J1, J2, J3])
def assemble(self, v, i=0):
v = np.array(v)%(2*np.pi) # confine the range to nice numbers
return [("gate", "U3", (v[0], v[1], v[2]), (i,))]
def __eq__(self, other):
return type(self) == type(other)
def __repr__(self):
return "U3Gate()"
class U2Gate(Gate):
"""Represents a parameterized single qubit gate, parameterized in the same way as IBM's U2 gate."""
def __init__(self):
self.num_inputs = 2
self.qudits = 1
def matrix(self, v):
return 1/np.sqrt(2) * np.array([[1, -np.exp(1j * v[1])], [np.exp(1j * v[0]), np.exp(1j * (v[0] + v[1]))]])
def mat_jac(self, v):
initial = 1/np.sqrt(2)
e1 = np.exp(1j * v[1])
e2 = np.exp(1j * v[0])
e3 = np.exp(1j * (v[0] + v[1]))
U = initial * np.array([[1, -e1], [e2, e3]])
J1 = initial * np.array([[0, 0], [1j * e2, 1j * e3]])
J2 = initial * np.array([[0, -1j * e1], [0, 1j * e3]])
return (U, [J1, J2])
def assemble(self, v, i=0):
v = np.array(v)%(2*np.pi) # confine the range to nice numbers
return [("gate", "U3", (np.pi/2, v[0], v[1]), (i,))]
def __eq__(self, other):
return type(self) == type(other)
def __repr__(self):
return "U2Gate()"
class U1Gate(Gate):
"""Represents an parameterized single qubit gate, parameterized in the same way as IBM's U1 gate."""
def __init__(self):
self.num_inputs = 1
self.qudits = 1
def matrix(self, v):
return np.exp(1j*v[0]/2) * unitaries.rot_z(v[0])
def mat_jac(self, v):
U = np.exp(1j*v[0]/2) * unitaries.rot_z(v[0])
J1 = 1j/2 * np.exp(1j*v[0]/2) * unitaries.rot_z(v[0]) + np.exp(1j*v[0]/2) * unitaries.rot_z_jac(v[0])
return (U, [J1])
def assemble(self, v, i=0):
v = np.array(v)%(2*np.pi) # confine the range to nice numbers
return [("gate", "U3", (0, 0, v[0]), (i,))]
def __eq__(self, other):
return type(self) == type(other)
def __repr__(self):
return "U1Gate()"
class SingleQutritGate(Gate):
"""This gate represents an arbitrary parameterized single-qutrit gate."""
def __init__(self):
self.num_inputs = 8
self.qudits = 1
def matrix(self, v):
# for reference see the original implementation, qt_arb_rot in utils.py, which is now deprecated
# this was re-written to be computationally more efficient and more readable
s1 = np.sin(v[0])
c1 = np.cos(v[0])
s2 = np.sin(v[1])
c2 = np.cos(v[1])
s3 = np.sin(v[2])
c3 = np.cos(v[2])
p1 = np.exp(1j * v[3])
m1 = np.exp(-1j * v[3])
p2 = np.exp(1j * v[4])
m2 = np.exp(-1j * v[4])
p3 = np.exp(1j * v[5])
m3 = np.exp(-1j * v[5])
p4 = np.exp(1j * v[6])
m4 = np.exp(-1j * v[6])
p5 = np.exp(1j * v[7])
m5 = np.exp(-1j * v[7])
return np.array([
[c1*c2*p1, s1*p3, c1*s2*p4],
[s2*s3*m4*m5 - s1*c2*c3*p1*p2*m3, c1*c3*p2, -c2*s3*m1*m5 - s1*s2*c3*p2*m3*p4],
[-s1*c2*s3*p1*m3*p5 - s2*c3*m2*m4, c1*s3*p5, c2*c3*m1*m2 - s1*s2*s3*m3*p4*p5]
], dtype = 'complex128')
def mat_jac(self, v):
s1 = np.sin(v[0])
c1 = np.cos(v[0])
s2 = np.sin(v[1])
c2 = np.cos(v[1])
s3 = np.sin(v[2])
c3 = np.cos(v[2])
p1 = np.exp(1j * v[3])
m1 = np.exp(-1j * v[3])
p2 = np.exp(1j * v[4])
m2 = np.exp(-1j * v[4])
p3 = np.exp(1j * v[5])
m3 = np.exp(-1j * v[5])
p4 = np.exp(1j * v[6])
m4 = np.exp(-1j * v[6])
p5 = np.exp(1j * v[7])
m5 = np.exp(-1j * v[7])
U = np.array([
[c1*c2*p1, s1*p3, c1*s2*p4],
[s2*s3*m4*m5 - s1*c2*c3*p1*p2*m3, c1*c3*p2, -c2*s3*m1*m5 - s1*s2*c3*p2*m3*p4],
[-s1*c2*s3*p1*m3*p5 - s2*c3*m2*m4, c1*s3*p5, c2*c3*m1*m2 - s1*s2*s3*m3*p4*p5]
], dtype = 'complex128')
Jt1 = np.array([
[-s1*c2*p1, c1*p3, -s1*s2*p4],
[-c1*c2*c3*p1*p2*m3, -s1*c3*p2, -c1*s2*c3*p2*m3*p4],
[-c1*c2*s3*p1*m3*p5, -s1*s3*p5, -c1*s2*s3*m3*p4*p5]
], dtype = 'complex128')
Jt2 = np.array([
[-c1*s2*p1, 0, c1*c2*p4],
[c2*s3*m4*m5 + s1*s2*c3*p1*p2*m3, 0, s2*s3*m1*m5 - s1*c2*c3*p2*m3*p4],
[s1*s2*s3*p1*m3*p5 -c2*c3*m2*m4, 0, -s2*c3*m1*m2 - s1*c2*s3*m3*p4*p5]
], dtype = 'complex128')
Jt3 = np.array([
[0, 0, 0],
[s2*c3*m4*m5 + s1*c2*s3*p1*p2*m3, -c1*s3*p2, -c2*c3*m1*m5 + s1*s2*s3*p2*m3*p4],
[-s1*c2*c3*p1*m3*p5 + s2*s3*m2*m4, c1*c3*p5, -c2*s3*m1*m2 - s1*s2*c3*m3*p4*p5]
], dtype = 'complex128')
Je1 = np.array([
[1j*c1*c2*p1, 0, 0],
[-1j*s1*c2*c3*p1*p2*m3, 0, 1j*c2*s3*m1*m5],
[-1j*s1*c2*s3*p1*m3*p5, 0, -1j*c2*c3*m1*m2]
], dtype = 'complex128')
Je2 = np.array([
[0, 0, 0],
[-1j*s1*c2*c3*p1*p2*m3, 1j*c1*c3*p2, -1j*s1*s2*c3*p2*m3*p4],
[1j*s2*c3*m2*m4, 0, -1j*c2*c3*m1*m2]
], dtype = 'complex128')
Je3 = np.array([
[0, 1j*s1*p3, 0],
[1j*s1*c2*c3*p1*p2*m3, 0, 1j*s1*s2*c3*p2*m3*p4],
[1j*s1*c2*s3*p1*m3*p5, 0, 1j*s1*s2*s3*m3*p4*p5]
], dtype = 'complex128')
Je4 = np.array([
[0, 0, 1j*c1*s2*p4],
[-1j*s2*s3*m4*m5, 0, -1j*s1*s2*c3*p2*m3*p4],
[1j*s2*c3*m2*m4, 0, -1j*s1*s2*s3*m3*p4*p5]
], dtype = 'complex128')
Je5 = np.array([
[0, 0, 0],
[-1j*s2*s3*m4*m5, 0, 1j*c2*s3*m1*m5],
[-1j*s1*c2*s3*p1*m3*p5, 1j*c1*s3*p5, -1j*s1*s2*s3*m3*p4*p5]
], dtype = 'complex128')
return (U, [Jt1, Jt2, Jt3, Je1, Je2, Je3, Je4, Je5])
def assemble(self, v, i=0):
return [("gate", "QUTRIT", (*v,), (i,))]
def __repr__(self):
return "SingleQutritGate()"
class CSUMGate(Gate):
"""Represents the constant two-qutrit gate CSUM"""
_csum = np.array([[1,0,0, 0,0,0, 0,0,0],
[0,1,0, 0,0,0, 0,0,0],
[0,0,1, 0,0,0, 0,0,0],
[0,0,0, 0,0,1, 0,0,0],
[0,0,0, 1,0,0, 0,0,0],
[0,0,0, 0,1,0, 0,0,0],
[0,0,0, 0,0,0, 0,1,0],
[0,0,0, 0,0,0, 1,0,0],
[0,0,0, 0,0,0, 0,0,1]
], dtype='complex128')
def __init__(self):
self.num_inputs = 0
self.qudits = 2
def matrix(self, v):
return CSUMGate._csum
def assemble(self, v, i=0):
return [("gate", "CSUM", (), (i, i+1))]
def __repr__(self):
return "CSUMGate()"
class CPIGate(Gate):
"""Represents the constant two-qutrit gate CPI."""
_cpi = np.array([[1,0,0, 0,0,0, 0,0,0],
[0,1,0, 0,0,0, 0,0,0],
[0,0,1, 0,0,0, 0,0,0],
[0,0,0, 0,1,0,0,0,0],
[0,0,0, 1,0,0, 0,0,0],
[0,0,0, 0,0,1, 0,0,0],
[0,0,0, 0,0,0, 1,0,0],
[0,0,0, 0,0,0, 0,1,0],
[0,0,0, 0,0,0, 0,0,1]
], dtype='complex128')
def __init__(self):
self.num_inputs = 0
self.qudits = 2
def matrix(self, v):
return CPIGate._cpi
def assemble(self, v, i=0):
return [("gate", "CPI", (), (i, i+1))]
def __repr__(self):
return "CPIGate()"
class CPIPhaseGate(Gate):
"""Represents the constant two-qutrit gate CPI with phase differences."""
def __init__(self):
self.num_inputs = 0
self._cpi = np.array([[1,0,0, 0,0,0, 0,0,0],
[0,1,0, 0,0,0, 0,0,0],
[0,0,1, 0,0,0, 0,0,0],
[0,0,0, 0,-1,0,0,0,0],
[0,0,0, 1,0,0, 0,0,0],
[0,0,0, 0,0,1, 0,0,0],
[0,0,0, 0,0,0, 1,0,0],
[0,0,0, 0,0,0, 0,1,0],
[0,0,0, 0,0,0, 0,0,1]
], dtype='complex128')
diag_mod = np.array(np.diag([1]*4 + [np.exp(2j * np.random.random()*np.pi) for _ in range(0,5)]))
self._cpi = np.matmul(self._cpi, diag_mod)
self.qudits = 2
def matrix(self, v):
return self._cpi
def assemble(self, v, i=0):
return [("gate", "CPI-", (), (i, i+1))]
def __repr__(self):
return "CPIPhaseGate()"
class CNOTGate(Gate):
"""Represents the constant two-qubit gate CNOT."""
_cnot = np.array([[1,0,0,0],
[0,1,0,0],
[0,0,0,1],
[0,0,1,0]], dtype='complex128')
def __init__(self):
self.num_inputs = 0
self.qudits = 2
def __eq__(self, other):
return type(self) == type(other)
def matrix(self, v):
return CNOTGate._cnot
def assemble(self, v, i=0):
return [("gate", "CNOT", (), (i, i+1))]
def __repr__(self):
return "CNOTGate()"
class CZGate(Gate):
"""Represents the constant two-qubit gate Controlled-Z."""
_gate = np.array([[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,-1]], dtype='complex128')
def __init__(self):
self.num_inputs = 0
self.qudits = 2
def __eq__(self, other):
return type(self) == type(other)
def matrix(self, v):
return CZGate._gate
def assemble(self, v, i=0):
return [("gate", "CZ", (), (i, i+1))]
def __repr__(self):
return "CZGate()"
class ISwapGate(Gate):
"""Represents the constant two-qubit gate ISwap."""
_gate = np.array([[1,0,0,0],
[0,0,1j,0],
[0,1j,0,0],
[0,0,0,1]], dtype='complex128')
def __init__(self):
self.num_inputs = 0
self.qudits = 2
def __eq__(self, other):
return type(self) == type(other)
def matrix(self, v):
return ISwapGate._gate
def assemble(self, v, i=0):
return [("gate", "ISWAP", (), (i, i+1))]
def __repr__(self):
return "ISwapGate()"
class XXGate(Gate):
"""Represents the constant two-qubit gate XX(pi/2)."""
_gate = 1/np.sqrt(2) * np.array([[1,0,0,-1j],
[0,1,-1j,0],
[0,-1j,1,0],
[-1j,0,0,1]], dtype='complex128')
def __init__(self):
self.num_inputs = 0
self.qudits = 2
def __eq__(self, other):
return type(self) == type(other)
def matrix(self, v):
return XXGate._gate
def assemble(self, v, i=0):
return [("gate", "XX", (), (i, i+1))]
def __repr__(self):
return "XXGate()"
class NonadjacentCNOTGate(Gate):
"""Represents the two-qubit gate CNOT, but between two qubits that are not necessarily next to each other."""
def __init__(self, qudits, control, target):
"""
Args:
qudits : The total number of qubits that a unitary of the size returned by this gate would represent. For this gate, usually
this is the total number of qubits in the larger circuit.
control : The index of the control qubit, relative to the 0th qubit that would be affected by the unitary returned by this gate.
target : The index of the target qubit, relative to the 0th qubit that would be affected by the unitary returned by this gate.
"""
self.qudits = qudits
self.num_inputs = 0
self.control = control
self.target = target
self._U = unitaries.arbitrary_cnot(qudits, control, target)
def matrix(self, v):
return self._U
def assemble(self, v, i=0):
return [("gate", "CNOT", (), (self.control, self.target))]
def __repr__(self):
return "NonadjacentCNOTGate({}, {}, {})".format(self.qudits, self.control, self.target)
def validate_structure(self):
if self.control >= self.qudits or self.target > self.qudits:
warn("Invalid NonadjacentCNOTGate; both control and target must be smaller than dits. Expected {} > {} and {}".format(self.qudits, self.control, self.target))
return False
if self.control == self.target:
warn("Invalid NonadjacentCNOTGate: control and target must be different. Expected {} != {}".format(self.control, self.target))
return False
return True
class UGate(Gate):
"""Represents an arbitrary constant gate, defined by the unitary passed to the initializer."""
def __init__(self, U, d=2, gatename="CUSTOM", gateparams=(), gateindices=None):
"""
Args:
U : The unitary for the operation that this gate represents, as a numpy ndarray with datatype="complex128".
d : The size of qudits for the operation that this gate represents. The default is 2, for qubits.
gatename : A name for this gate, which will get passed to the Assembler at assembly time.
gateparams : A tuple of parameters that will get passed to the Assembler at assembly time.
gateindices : A tuple of indices for the qubits that this gate acts on, which will get passed to the Assembler at assembly time. This overrides the default behavior, which is to return a tuple of all the indices starting with the one passed in assemble(v, i), and ending at i+self.qudits
"""
self.d = d
self.U = U
self.qudits = int(np.log(U.shape[0])/np.log(2))
self.num_inputs = 0
self.gatename = gatename
self.gateparams = gateparams
self.gateindices = gateindices
def matrix(self, v):
return self.U
def assemble(self, v, i=0):
gatename = self.gatename
gateparams = self.gateparams
indices = self.gateindices if self.gateindices is not None else tuple((i+j for j in range(self.qudits)))
return [("gate", gatename, gateparams, indices)]
def __repr__(self):
if self.d == 2:
return "UGate({})".format(repr(U))
else:
return "UGate({}, d={})".format(repr(U), self.d)
class UpgradedConstantGate(Gate):
"""Represents a constant gate, based on the Gate passed to its initializer, but upgraded to act on qudits of a larger size."""
def __init__(self, other, df=3):
"""
Args:
other : A Gate of a lower qudit size.
df : The final, upgraded qudit size. The default is 3, for upgrading gates from qubits to qutrits.
"""
if other.num_inputs > 0:
raise AttributeError("UpgradedConstantGate is designed for only constant gates")
OU = other.matrix([])
di = int(OU.shape[0]**(1/other.qudits))
if df <= di:
raise AttributeError("Gate cannot be upgraded because it is already of an equal or higher dit level")
self.df = df
self.qudits = other.qudits
self.U = utils.upgrade_qudits(OU, di, df)
self.num_inputs = 0
self.subgate = other
def matrix(self, v):
return self.U
def assemble(self, v, i=0):
return self.subgate.assemble(v, i)
def __repr__(self):
return "UpgradedConstantGate({}, df={})".format(repr(self.subgate), self.df)
class CUGate(Gate):
"""Represents an arbitrary controlled gate, defined by the unitary passed to the initializer."""
def __init__(self, U, gatename="Name", gateparams=(), flipped=False):
"""
Args:
U : The unitary to form the controlled-unitary gate, in the form of a numpy ndarray with dtype="complex128"
gatename : A name for this controlled gate which will get passed to the Assembler at assembly time.
gateparams : A tuple of parameters that will get passed to the Assembler at assembly time.
flipped : A boolean flag, which if set to true, will flip the direction of the gate. The default direction is for the control qubit to be the lower indexed qubit.
"""
self.gatename = gatename
self.gateparams = gateparams
self.flipped = flipped
self.num_inputs = 0
self._U = U
n = np.shape(U)[0]
I = np.array(np.eye(n))
top = np.pad(self._U if flipped else I, [(0,n),(0,n)], 'constant')
bot = np.pad(I if flipped else self._U, [(n,0),(n,0)], 'constant')
self._CU = np.array(top + bot)
self.qudits = 2
self.num_inputs = 0
def matrix(self, v):
return self._CU
def assemble(self, v, i=0):
gatename = self.gatename
gateparams = self.gateparams
indices = (i, i+1) if not flipped else (i+1, i)
return [("gate", gatename, gateparams, indices)]
def __repr__(self):
return "CUGate(" + str(repr(self._U)) + ("" if self.name is None else ", name={}".format(repr(self.name))) + ("flipped=True" if self.flipped else "") + ")"
class CNOTRootGate(Gate):
"""Represents the sqrt(CNOT) gate. Two sqrt(CNOT) gates in a row will form a CNOT gate."""
_cnr = np.array([[1,0,0,0],
[0,1,0,0],
[0,0,0.5+0.5j,0.5-0.5j],
[0,0,0.5-0.5j,0.5+0.5j]])
def __init__(self):
self.num_inputs = 0
self.qudits = 2
def matrix(self, v):
return CNOTRootGate._cnr
def assemble(self, v, i=0):
return [("gate", "sqrt(CNOT)", (), (i, i+1))]
def __repr__(self):
return "CNOTRootGate()"
class KroneckerGate(Gate):
"""Represents the Kronecker product of a list of gates. This is equivalent to performing those gate in parallel in a quantum circuit."""
def __init__(self, *subgates):
"""
Args:
*subgates : An sequence of Gates. KroneckerGate will return the kronecker product of the unitaries returned by those Gates.
"""
self.num_inputs = sum([gate.num_inputs for gate in subgates])
self._subgates = subgates
self.qudits = sum([gate.qudits for gate in subgates])
def matrix(self, v):
if len(self._subgates) < 2:
return self._subgates[0].matrix(v)
matrices = []
index = 0
for gate in self._subgates:
U = gate.matrix(v[index:index+gate.num_inputs])
matrices.append(U)
index += gate.num_inputs
U = matrices[0]
for matrix in matrices[1:]:
U = np.kron(U, matrix)
return U
def mat_jac(self, v):
if len(self._subgates) < 2:
return self._subgates[0].mat_jac(v)
matjacs = []
index = 0
for gate in self._subgates:
MJ = gate.mat_jac(v[index:index+gate.num_inputs])
matjacs.append(MJ)
index += gate.num_inputs
U = None
jacs = []
for M, Js in matjacs:
jacs = [np.kron(J, M) for J in jacs]
for J in Js:
jacs.append(J if U is None else np.kron(U,J))
U = M if U is None else np.kron(U, M)
return (U, jacs)
def assemble(self, v, i=0):
out = []
index = 0
for gate in self._subgates:
out += gate.assemble(v[index:index+gate.num_inputs], i)
index += gate.num_inputs
i += gate.qudits
return [("block", out)]
def appending(self, gate):
"""Returns a new KroneckerGate with the new gate added to the list.
Args:
gate : A Gate to be added to the end of the list of gates in the new KroneckerGate.
"""
return KroneckerGate(*self._subgates, gate)
def _parts(self):
return self._subgates
def __deepcopy__(self, memo):
return KroneckerGate(self._subgates.__deepcopy__(memo))
def __repr__(self):
return "KroneckerGate({})".format(repr(self._subgates)[1:-1])
def validate_structure(self):
valid = True
num_inputs = 0
dits = 0
for subgate in self._subgates:
if not subgate.validate_structure():
valid = False
num_inputs += subgate.num_inputs
dits += subgate.qudits
if num_inputs != self.num_inputs:
warn("KroneckerGate had a num_inputs mismatch: expected {} but got {}".format(self.num_inputs, num_inputs))
valid = False
if dits != self.qudits:
warn("KroneckerGate had a dits mismatch: expected {} but got {}".format(self.qudits, dits))
valid = False
return valid
class ProductGate(Gate):
"""Represents a matrix product of Gates. This is equivalent to performing those gates sequentially in a quantum circuit."""
def __init__(self, *subgates):
"""
Args:
subgates : A list of Gates to be multiplied together. ProductGate returns the matrix product of the unitaries returned by those Gates.
"""
self.num_inputs = sum([gate.num_inputs for gate in subgates])
self._subgates = list(subgates)
self.qudits = 0 if len(subgates) == 0 else subgates[0].qudits
def matrix(self, v):
if len(self._subgates) < 2:
return self._subgates[0].matrix(v)
matrices = []
index = 0
for gate in self._subgates:
U = gate.matrix(v[index:index+gate.num_inputs])
matrices.append(U)
index += gate.num_inputs
U = matrices[0]
buffer1 = U.copy()
buffer2 = U.copy()
for matrix in matrices[1:]:
U = np.matmul(matrix, U, out=buffer1)
buffertmp = buffer2
buffer2 = buffer1
buffer1 = buffer2
return U
def mat_jac(self, v):
if len(self._subgates) < 2:
return self._subgates[0].mat_jac(v)
submats = []
subjacs = []
index = 0
for gate in self._subgates:
U, Js = gate.mat_jac(v[index:index+gate.num_inputs])
submats.append(U)
subjacs.append(Js)
index += gate.num_inputs
B = np.eye(submats[0].shape[0], dtype='complex128')
A = submats[0]
jacs = []
ba1 = A.copy()
ba2 = A
bb1 = B.copy()
bb2 = B
bj = B.copy()
for matrix in submats[1:]:
A = np.matmul(matrix, A, out=ba1)
buffertmp = ba2
ba2 = ba1
ba1 = ba2
for i, Js in enumerate(subjacs):
A = np.matmul(A, submats[i].T.conjugate(), out=ba1) # remove the current matrix from the "after" array
for J in Js:
tmp = np.matmul(J, B, out=bj)
jacs.append(np.matmul(A, tmp, out=J))
B = np.matmul(submats[i], B, out=bb1) # add the current matrix to the "before" array before progressing
buffertmp = ba1
ba1 = ba2
ba2 = buffertmp
buffertmp = bb1
bb1 = bb2
bb2 = buffertmp
return (B, jacs)
def assemble(self, v, i=0):
out = []
index = 0
for gate in self._subgates:
out += gate.assemble(v[index:index+gate.num_inputs], i)
index += gate.num_inputs
return out
def appending(self, *gates):
"""Returns a new ProductGate with the new gates appended to the end.
Args:
gates : A list of Gates to be appended.
"""
return ProductGate(*self._subgates, *gates)
def inserting(self, *gates, depth=-1):
"""Returns a new ProductGate with new `gates` inserted at some index `depth`.
Args:
gates : A list of Gates to be inserted.
depth : An index in the subgates of the ProductGate after which the new gates will be inserted. The default value of -1 will insert these gates at the begining of the ProductGate.
"""
return ProductGate(*self._subgates[:depth], *gates, *self._subgates[depth:])
def __deepcopy__(self, memo):
return ProductGate(self._subgates.__deepcopy__(memo))
def __repr__(self):
return "ProductGate({})".format(repr(self._subgates)[1:-1])
def validate_structure(self):
valid = True
num_inputs = 0
for subgate in self._subgates:
if subgate.qudits != self.qudits:
warn("ProductGate had a size mismatch: expected {} but got {}".format(self.qudits, subgate.qudits))
valid = False
if not subgate.validate_structure():
valid = False
num_inputs += subgate.num_inputs
if num_inputs != self.num_inputs:
warn("ProductGate had a num_inputs mismatch: expected {} but got {}".format(self.num_inputs, num_inputs))
valid = False
valid = True
| [
"numpy.pad",
"numpy.eye",
"numpy.log",
"numpy.shape",
"numpy.sin",
"numpy.array",
"numpy.exp",
"numpy.cos",
"numpy.matmul",
"numpy.kron",
"numpy.dot",
"numpy.random.random",
"numpy.sqrt"
] | [((19155, 19459), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, \n 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, \n 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]]'], {'dtype': '"""complex128"""'}), "([[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, \n 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0,\n 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0,\n 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype='complex128')\n", (19163, 19459), True, 'import numpy as np\n'), ((19968, 20272), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, \n 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, \n 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]]'], {'dtype': '"""complex128"""'}), "([[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, \n 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0,\n 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype='complex128')\n", (19976, 20272), True, 'import numpy as np\n'), ((21824, 21915), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]'], {'dtype': '"""complex128"""'}), "([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]], dtype=\n 'complex128')\n", (21832, 21915), True, 'import numpy as np\n'), ((22402, 22494), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]]'], {'dtype': '"""complex128"""'}), "([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]], dtype=\n 'complex128')\n", (22410, 22494), True, 'import numpy as np\n'), ((22971, 23067), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 0, 1.0j, 0], [0, 1.0j, 0, 0], [0, 0, 0, 1]]'], {'dtype': '"""complex128"""'}), "([[1, 0, 0, 0], [0, 0, 1.0j, 0], [0, 1.0j, 0, 0], [0, 0, 0, 1]],\n dtype='complex128')\n", (22979, 23067), True, 'import numpy as np\n'), ((30428, 30534), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0.5 + 0.5j, 0.5 - 0.5j], [0, 0, 0.5 - \n 0.5j, 0.5 + 0.5j]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0.5 + 0.5j, 0.5 - 0.5j], [0, 0,\n 0.5 - 0.5j, 0.5 + 0.5j]])\n', (30436, 30534), True, 'import numpy as np\n'), ((7446, 7491), 'numpy.dot', 'np.dot', (['self._x90', 'self._rot_z'], {'out': 'self._out'}), '(self._x90, self._rot_z, out=self._out)\n', (7452, 7491), True, 'import numpy as np\n'), ((7557, 7605), 'numpy.dot', 'np.dot', (['self._rot_z', 'self._out'], {'out': 'self._buffer'}), '(self._rot_z, self._out, out=self._buffer)\n', (7563, 7605), True, 'import numpy as np\n'), ((7626, 7672), 'numpy.dot', 'np.dot', (['self._x90', 'self._buffer'], {'out': 'self._out'}), '(self._x90, self._buffer, out=self._out)\n', (7632, 7672), True, 'import numpy as np\n'), ((7730, 7760), 'numpy.dot', 'np.dot', (['self._rot_z', 'self._out'], {}), '(self._rot_z, self._out)\n', (7736, 7760), True, 'import numpy as np\n'), ((7854, 7899), 'numpy.dot', 'np.dot', (['self._x90', 'self._rot_z'], {'out': 'self._out'}), '(self._x90, self._rot_z, out=self._out)\n', (7860, 7899), True, 'import numpy as np\n'), ((7965, 8013), 'numpy.dot', 'np.dot', (['self._rot_z', 'self._out'], {'out': 'self._buffer'}), '(self._rot_z, self._out, out=self._buffer)\n', (7971, 8013), True, 'import numpy as np\n'), ((8034, 8080), 'numpy.dot', 'np.dot', (['self._x90', 'self._buffer'], {'out': 'self._out'}), '(self._x90, self._buffer, out=self._out)\n', (8040, 8080), True, 'import numpy as np\n'), ((8136, 8166), 'numpy.dot', 'np.dot', (['self._rot_z', 'self._out'], {}), '(self._rot_z, self._out)\n', (8142, 8166), True, 'import numpy as np\n'), ((8230, 8275), 'numpy.dot', 'np.dot', (['self._x90', 'self._rot_z'], {'out': 'self._out'}), '(self._x90, self._rot_z, out=self._out)\n', (8236, 8275), True, 'import numpy as np\n'), ((8345, 8393), 'numpy.dot', 'np.dot', (['self._rot_z', 'self._out'], {'out': 'self._buffer'}), '(self._rot_z, self._out, out=self._buffer)\n', (8351, 8393), True, 'import numpy as np\n'), ((8414, 8460), 'numpy.dot', 'np.dot', (['self._x90', 'self._buffer'], {'out': 'self._out'}), '(self._x90, self._buffer, out=self._out)\n', (8420, 8460), True, 'import numpy as np\n'), ((8516, 8546), 'numpy.dot', 'np.dot', (['self._rot_z', 'self._out'], {}), '(self._rot_z, self._out)\n', (8522, 8546), True, 'import numpy as np\n'), ((8610, 8655), 'numpy.dot', 'np.dot', (['self._x90', 'self._rot_z'], {'out': 'self._out'}), '(self._x90, self._rot_z, out=self._out)\n', (8616, 8655), True, 'import numpy as np\n'), ((8721, 8769), 'numpy.dot', 'np.dot', (['self._rot_z', 'self._out'], {'out': 'self._buffer'}), '(self._rot_z, self._out, out=self._buffer)\n', (8727, 8769), True, 'import numpy as np\n'), ((8790, 8836), 'numpy.dot', 'np.dot', (['self._x90', 'self._buffer'], {'out': 'self._out'}), '(self._x90, self._buffer, out=self._out)\n', (8796, 8836), True, 'import numpy as np\n'), ((8896, 8926), 'numpy.dot', 'np.dot', (['self._rot_z', 'self._out'], {}), '(self._rot_z, self._out)\n', (8902, 8926), True, 'import numpy as np\n'), ((8990, 9020), 'numpy.dot', 'np.dot', (['self._rot_z', 'self._out'], {}), '(self._rot_z, self._out)\n', (8996, 9020), True, 'import numpy as np\n'), ((10316, 10364), 'numpy.dot', 'np.dot', (['self._rot_z', 'self._x90'], {'out': 'self._buffer'}), '(self._rot_z, self._x90, out=self._buffer)\n', (10322, 10364), True, 'import numpy as np\n'), ((10385, 10431), 'numpy.dot', 'np.dot', (['self._x90', 'self._buffer'], {'out': 'self._out'}), '(self._x90, self._buffer, out=self._out)\n', (10391, 10431), True, 'import numpy as np\n'), ((10489, 10519), 'numpy.dot', 'np.dot', (['self._rot_z', 'self._out'], {}), '(self._rot_z, self._out)\n', (10495, 10519), True, 'import numpy as np\n'), ((10616, 10664), 'numpy.dot', 'np.dot', (['self._rot_z', 'self._x90'], {'out': 'self._buffer'}), '(self._rot_z, self._x90, out=self._buffer)\n', (10622, 10664), True, 'import numpy as np\n'), ((10685, 10731), 'numpy.dot', 'np.dot', (['self._x90', 'self._buffer'], {'out': 'self._out'}), '(self._x90, self._buffer, out=self._out)\n', (10691, 10731), True, 'import numpy as np\n'), ((10787, 10817), 'numpy.dot', 'np.dot', (['self._rot_z', 'self._out'], {}), '(self._rot_z, self._out)\n', (10793, 10817), True, 'import numpy as np\n'), ((10884, 10932), 'numpy.dot', 'np.dot', (['self._rot_z', 'self._x90'], {'out': 'self._buffer'}), '(self._rot_z, self._x90, out=self._buffer)\n', (10890, 10932), True, 'import numpy as np\n'), ((10953, 10999), 'numpy.dot', 'np.dot', (['self._x90', 'self._buffer'], {'out': 'self._out'}), '(self._x90, self._buffer, out=self._out)\n', (10959, 10999), True, 'import numpy as np\n'), ((11059, 11089), 'numpy.dot', 'np.dot', (['self._rot_z', 'self._out'], {}), '(self._rot_z, self._out)\n', (11065, 11089), True, 'import numpy as np\n'), ((11145, 11175), 'numpy.dot', 'np.dot', (['self._rot_z', 'self._out'], {}), '(self._rot_z, self._out)\n', (11151, 11175), True, 'import numpy as np\n'), ((11920, 11936), 'numpy.cos', 'np.cos', (['(v[0] / 2)'], {}), '(v[0] / 2)\n', (11926, 11936), True, 'import numpy as np\n'), ((11948, 11964), 'numpy.sin', 'np.sin', (['(v[0] / 2)'], {}), '(v[0] / 2)\n', (11954, 11964), True, 'import numpy as np\n'), ((11976, 11988), 'numpy.cos', 'np.cos', (['v[1]'], {}), '(v[1])\n', (11982, 11988), True, 'import numpy as np\n'), ((12002, 12014), 'numpy.sin', 'np.sin', (['v[1]'], {}), '(v[1])\n', (12008, 12014), True, 'import numpy as np\n'), ((12028, 12040), 'numpy.cos', 'np.cos', (['v[2]'], {}), '(v[2])\n', (12034, 12040), True, 'import numpy as np\n'), ((12054, 12066), 'numpy.sin', 'np.sin', (['v[2]'], {}), '(v[2])\n', (12060, 12066), True, 'import numpy as np\n'), ((12082, 12231), 'numpy.array', 'np.array', (['[[ct, -st * (cl + 1.0j * sl)], [st * (cp + 1.0j * sp), ct * (cl * cp - sl *\n sp + 1.0j * cl * sp + 1.0j * sl * cp)]]'], {'dtype': '"""complex128"""'}), "([[ct, -st * (cl + 1.0j * sl)], [st * (cp + 1.0j * sp), ct * (cl *\n cp - sl * sp + 1.0j * cl * sp + 1.0j * sl * cp)]], dtype='complex128')\n", (12090, 12231), True, 'import numpy as np\n'), ((12261, 12277), 'numpy.cos', 'np.cos', (['(v[0] / 2)'], {}), '(v[0] / 2)\n', (12267, 12277), True, 'import numpy as np\n'), ((12289, 12305), 'numpy.sin', 'np.sin', (['(v[0] / 2)'], {}), '(v[0] / 2)\n', (12295, 12305), True, 'import numpy as np\n'), ((12317, 12329), 'numpy.cos', 'np.cos', (['v[1]'], {}), '(v[1])\n', (12323, 12329), True, 'import numpy as np\n'), ((12343, 12355), 'numpy.sin', 'np.sin', (['v[1]'], {}), '(v[1])\n', (12349, 12355), True, 'import numpy as np\n'), ((12369, 12381), 'numpy.cos', 'np.cos', (['v[2]'], {}), '(v[2])\n', (12375, 12381), True, 'import numpy as np\n'), ((12395, 12407), 'numpy.sin', 'np.sin', (['v[2]'], {}), '(v[2])\n', (12401, 12407), True, 'import numpy as np\n'), ((12421, 12570), 'numpy.array', 'np.array', (['[[ct, -st * (cl + 1.0j * sl)], [st * (cp + 1.0j * sp), ct * (cl * cp - sl *\n sp + 1.0j * cl * sp + 1.0j * sl * cp)]]'], {'dtype': '"""complex128"""'}), "([[ct, -st * (cl + 1.0j * sl)], [st * (cp + 1.0j * sp), ct * (cl *\n cp - sl * sp + 1.0j * cl * sp + 1.0j * sl * cp)]], dtype='complex128')\n", (12429, 12570), True, 'import numpy as np\n'), ((12572, 12752), 'numpy.array', 'np.array', (['[[-0.5 * st, -0.5 * ct * (cl + 1.0j * sl)], [0.5 * ct * (cp + 1.0j * sp), -\n 0.5 * st * (cl * cp - sl * sp + 1.0j * cl * sp + 1.0j * sl * cp)]]'], {'dtype': '"""complex128"""'}), "([[-0.5 * st, -0.5 * ct * (cl + 1.0j * sl)], [0.5 * ct * (cp + 1.0j *\n sp), -0.5 * st * (cl * cp - sl * sp + 1.0j * cl * sp + 1.0j * sl * cp)]\n ], dtype='complex128')\n", (12580, 12752), True, 'import numpy as np\n'), ((12741, 12871), 'numpy.array', 'np.array', (['[[0, 0], [st * (-sp + 1.0j * cp), ct * (cl * -sp - sl * cp + 1.0j * cl * cp +\n 1.0j * sl * -sp)]]'], {'dtype': '"""complex128"""'}), "([[0, 0], [st * (-sp + 1.0j * cp), ct * (cl * -sp - sl * cp + 1.0j *\n cl * cp + 1.0j * sl * -sp)]], dtype='complex128')\n", (12749, 12871), True, 'import numpy as np\n'), ((12873, 13004), 'numpy.array', 'np.array', (['[[0, -st * (-sl + 1.0j * cl)], [0, ct * (-sl * cp - cl * sp + 1.0j * -sl *\n sp + 1.0j * cl * cp)]]'], {'dtype': '"""complex128"""'}), "([[0, -st * (-sl + 1.0j * cl)], [0, ct * (-sl * cp - cl * sp + 1.0j *\n -sl * sp + 1.0j * cl * cp)]], dtype='complex128')\n", (12881, 13004), True, 'import numpy as np\n'), ((13722, 13741), 'numpy.exp', 'np.exp', (['(1.0j * v[1])'], {}), '(1.0j * v[1])\n', (13728, 13741), True, 'import numpy as np\n'), ((13753, 13772), 'numpy.exp', 'np.exp', (['(1.0j * v[0])'], {}), '(1.0j * v[0])\n', (13759, 13772), True, 'import numpy as np\n'), ((13784, 13812), 'numpy.exp', 'np.exp', (['(1.0j * (v[0] + v[1]))'], {}), '(1.0j * (v[0] + v[1]))\n', (13790, 13812), True, 'import numpy as np\n'), ((15496, 15508), 'numpy.sin', 'np.sin', (['v[0]'], {}), '(v[0])\n', (15502, 15508), True, 'import numpy as np\n'), ((15522, 15534), 'numpy.cos', 'np.cos', (['v[0]'], {}), '(v[0])\n', (15528, 15534), True, 'import numpy as np\n'), ((15548, 15560), 'numpy.sin', 'np.sin', (['v[1]'], {}), '(v[1])\n', (15554, 15560), True, 'import numpy as np\n'), ((15574, 15586), 'numpy.cos', 'np.cos', (['v[1]'], {}), '(v[1])\n', (15580, 15586), True, 'import numpy as np\n'), ((15600, 15612), 'numpy.sin', 'np.sin', (['v[2]'], {}), '(v[2])\n', (15606, 15612), True, 'import numpy as np\n'), ((15626, 15638), 'numpy.cos', 'np.cos', (['v[2]'], {}), '(v[2])\n', (15632, 15638), True, 'import numpy as np\n'), ((15661, 15680), 'numpy.exp', 'np.exp', (['(1.0j * v[3])'], {}), '(1.0j * v[3])\n', (15667, 15680), True, 'import numpy as np\n'), ((15692, 15712), 'numpy.exp', 'np.exp', (['(-1.0j * v[3])'], {}), '(-1.0j * v[3])\n', (15698, 15712), True, 'import numpy as np\n'), ((15724, 15743), 'numpy.exp', 'np.exp', (['(1.0j * v[4])'], {}), '(1.0j * v[4])\n', (15730, 15743), True, 'import numpy as np\n'), ((15755, 15775), 'numpy.exp', 'np.exp', (['(-1.0j * v[4])'], {}), '(-1.0j * v[4])\n', (15761, 15775), True, 'import numpy as np\n'), ((15787, 15806), 'numpy.exp', 'np.exp', (['(1.0j * v[5])'], {}), '(1.0j * v[5])\n', (15793, 15806), True, 'import numpy as np\n'), ((15818, 15838), 'numpy.exp', 'np.exp', (['(-1.0j * v[5])'], {}), '(-1.0j * v[5])\n', (15824, 15838), True, 'import numpy as np\n'), ((15850, 15869), 'numpy.exp', 'np.exp', (['(1.0j * v[6])'], {}), '(1.0j * v[6])\n', (15856, 15869), True, 'import numpy as np\n'), ((15881, 15901), 'numpy.exp', 'np.exp', (['(-1.0j * v[6])'], {}), '(-1.0j * v[6])\n', (15887, 15901), True, 'import numpy as np\n'), ((15913, 15932), 'numpy.exp', 'np.exp', (['(1.0j * v[7])'], {}), '(1.0j * v[7])\n', (15919, 15932), True, 'import numpy as np\n'), ((15944, 15964), 'numpy.exp', 'np.exp', (['(-1.0j * v[7])'], {}), '(-1.0j * v[7])\n', (15950, 15964), True, 'import numpy as np\n'), ((15979, 16295), 'numpy.array', 'np.array', (['[[c1 * c2 * p1, s1 * p3, c1 * s2 * p4], [s2 * s3 * m4 * m5 - s1 * c2 * c3 *\n p1 * p2 * m3, c1 * c3 * p2, -c2 * s3 * m1 * m5 - s1 * s2 * c3 * p2 * m3 *\n p4], [-s1 * c2 * s3 * p1 * m3 * p5 - s2 * c3 * m2 * m4, c1 * s3 * p5, \n c2 * c3 * m1 * m2 - s1 * s2 * s3 * m3 * p4 * p5]]'], {'dtype': '"""complex128"""'}), "([[c1 * c2 * p1, s1 * p3, c1 * s2 * p4], [s2 * s3 * m4 * m5 - s1 *\n c2 * c3 * p1 * p2 * m3, c1 * c3 * p2, -c2 * s3 * m1 * m5 - s1 * s2 * c3 *\n p2 * m3 * p4], [-s1 * c2 * s3 * p1 * m3 * p5 - s2 * c3 * m2 * m4, c1 *\n s3 * p5, c2 * c3 * m1 * m2 - s1 * s2 * s3 * m3 * p4 * p5]], dtype=\n 'complex128')\n", (15987, 16295), True, 'import numpy as np\n'), ((16289, 16301), 'numpy.sin', 'np.sin', (['v[0]'], {}), '(v[0])\n', (16295, 16301), True, 'import numpy as np\n'), ((16315, 16327), 'numpy.cos', 'np.cos', (['v[0]'], {}), '(v[0])\n', (16321, 16327), True, 'import numpy as np\n'), ((16341, 16353), 'numpy.sin', 'np.sin', (['v[1]'], {}), '(v[1])\n', (16347, 16353), True, 'import numpy as np\n'), ((16367, 16379), 'numpy.cos', 'np.cos', (['v[1]'], {}), '(v[1])\n', (16373, 16379), True, 'import numpy as np\n'), ((16393, 16405), 'numpy.sin', 'np.sin', (['v[2]'], {}), '(v[2])\n', (16399, 16405), True, 'import numpy as np\n'), ((16419, 16431), 'numpy.cos', 'np.cos', (['v[2]'], {}), '(v[2])\n', (16425, 16431), True, 'import numpy as np\n'), ((16454, 16473), 'numpy.exp', 'np.exp', (['(1.0j * v[3])'], {}), '(1.0j * v[3])\n', (16460, 16473), True, 'import numpy as np\n'), ((16485, 16505), 'numpy.exp', 'np.exp', (['(-1.0j * v[3])'], {}), '(-1.0j * v[3])\n', (16491, 16505), True, 'import numpy as np\n'), ((16517, 16536), 'numpy.exp', 'np.exp', (['(1.0j * v[4])'], {}), '(1.0j * v[4])\n', (16523, 16536), True, 'import numpy as np\n'), ((16548, 16568), 'numpy.exp', 'np.exp', (['(-1.0j * v[4])'], {}), '(-1.0j * v[4])\n', (16554, 16568), True, 'import numpy as np\n'), ((16580, 16599), 'numpy.exp', 'np.exp', (['(1.0j * v[5])'], {}), '(1.0j * v[5])\n', (16586, 16599), True, 'import numpy as np\n'), ((16611, 16631), 'numpy.exp', 'np.exp', (['(-1.0j * v[5])'], {}), '(-1.0j * v[5])\n', (16617, 16631), True, 'import numpy as np\n'), ((16643, 16662), 'numpy.exp', 'np.exp', (['(1.0j * v[6])'], {}), '(1.0j * v[6])\n', (16649, 16662), True, 'import numpy as np\n'), ((16674, 16694), 'numpy.exp', 'np.exp', (['(-1.0j * v[6])'], {}), '(-1.0j * v[6])\n', (16680, 16694), True, 'import numpy as np\n'), ((16706, 16725), 'numpy.exp', 'np.exp', (['(1.0j * v[7])'], {}), '(1.0j * v[7])\n', (16712, 16725), True, 'import numpy as np\n'), ((16737, 16757), 'numpy.exp', 'np.exp', (['(-1.0j * v[7])'], {}), '(-1.0j * v[7])\n', (16743, 16757), True, 'import numpy as np\n'), ((16769, 17085), 'numpy.array', 'np.array', (['[[c1 * c2 * p1, s1 * p3, c1 * s2 * p4], [s2 * s3 * m4 * m5 - s1 * c2 * c3 *\n p1 * p2 * m3, c1 * c3 * p2, -c2 * s3 * m1 * m5 - s1 * s2 * c3 * p2 * m3 *\n p4], [-s1 * c2 * s3 * p1 * m3 * p5 - s2 * c3 * m2 * m4, c1 * s3 * p5, \n c2 * c3 * m1 * m2 - s1 * s2 * s3 * m3 * p4 * p5]]'], {'dtype': '"""complex128"""'}), "([[c1 * c2 * p1, s1 * p3, c1 * s2 * p4], [s2 * s3 * m4 * m5 - s1 *\n c2 * c3 * p1 * p2 * m3, c1 * c3 * p2, -c2 * s3 * m1 * m5 - s1 * s2 * c3 *\n p2 * m3 * p4], [-s1 * c2 * s3 * p1 * m3 * p5 - s2 * c3 * m2 * m4, c1 *\n s3 * p5, c2 * c3 * m1 * m2 - s1 * s2 * s3 * m3 * p4 * p5]], dtype=\n 'complex128')\n", (16777, 17085), True, 'import numpy as np\n'), ((17054, 17287), 'numpy.array', 'np.array', (['[[-s1 * c2 * p1, c1 * p3, -s1 * s2 * p4], [-c1 * c2 * c3 * p1 * p2 * m3, -\n s1 * c3 * p2, -c1 * s2 * c3 * p2 * m3 * p4], [-c1 * c2 * s3 * p1 * m3 *\n p5, -s1 * s3 * p5, -c1 * s2 * s3 * m3 * p4 * p5]]'], {'dtype': '"""complex128"""'}), "([[-s1 * c2 * p1, c1 * p3, -s1 * s2 * p4], [-c1 * c2 * c3 * p1 * p2 *\n m3, -s1 * c3 * p2, -c1 * s2 * c3 * p2 * m3 * p4], [-c1 * c2 * s3 * p1 *\n m3 * p5, -s1 * s3 * p5, -c1 * s2 * s3 * m3 * p4 * p5]], dtype='complex128')\n", (17062, 17287), True, 'import numpy as np\n'), ((17289, 17572), 'numpy.array', 'np.array', (['[[-c1 * s2 * p1, 0, c1 * c2 * p4], [c2 * s3 * m4 * m5 + s1 * s2 * c3 * p1 *\n p2 * m3, 0, s2 * s3 * m1 * m5 - s1 * c2 * c3 * p2 * m3 * p4], [s1 * s2 *\n s3 * p1 * m3 * p5 - c2 * c3 * m2 * m4, 0, -s2 * c3 * m1 * m2 - s1 * c2 *\n s3 * m3 * p4 * p5]]'], {'dtype': '"""complex128"""'}), "([[-c1 * s2 * p1, 0, c1 * c2 * p4], [c2 * s3 * m4 * m5 + s1 * s2 *\n c3 * p1 * p2 * m3, 0, s2 * s3 * m1 * m5 - s1 * c2 * c3 * p2 * m3 * p4],\n [s1 * s2 * s3 * p1 * m3 * p5 - c2 * c3 * m2 * m4, 0, -s2 * c3 * m1 * m2 -\n s1 * c2 * s3 * m3 * p4 * p5]], dtype='complex128')\n", (17297, 17572), True, 'import numpy as np\n'), ((17555, 17840), 'numpy.array', 'np.array', (['[[0, 0, 0], [s2 * c3 * m4 * m5 + s1 * c2 * s3 * p1 * p2 * m3, -c1 * s3 * p2,\n -c2 * c3 * m1 * m5 + s1 * s2 * s3 * p2 * m3 * p4], [-s1 * c2 * c3 * p1 *\n m3 * p5 + s2 * s3 * m2 * m4, c1 * c3 * p5, -c2 * s3 * m1 * m2 - s1 * s2 *\n c3 * m3 * p4 * p5]]'], {'dtype': '"""complex128"""'}), "([[0, 0, 0], [s2 * c3 * m4 * m5 + s1 * c2 * s3 * p1 * p2 * m3, -c1 *\n s3 * p2, -c2 * c3 * m1 * m5 + s1 * s2 * s3 * p2 * m3 * p4], [-s1 * c2 *\n c3 * p1 * m3 * p5 + s2 * s3 * m2 * m4, c1 * c3 * p5, -c2 * s3 * m1 * m2 -\n s1 * s2 * c3 * m3 * p4 * p5]], dtype='complex128')\n", (17563, 17840), True, 'import numpy as np\n'), ((17824, 18029), 'numpy.array', 'np.array', (['[[1.0j * c1 * c2 * p1, 0, 0], [-1.0j * s1 * c2 * c3 * p1 * p2 * m3, 0, 1.0j *\n c2 * s3 * m1 * m5], [-1.0j * s1 * c2 * s3 * p1 * m3 * p5, 0, -1.0j * c2 *\n c3 * m1 * m2]]'], {'dtype': '"""complex128"""'}), "([[1.0j * c1 * c2 * p1, 0, 0], [-1.0j * s1 * c2 * c3 * p1 * p2 * m3,\n 0, 1.0j * c2 * s3 * m1 * m5], [-1.0j * s1 * c2 * s3 * p1 * m3 * p5, 0, \n -1.0j * c2 * c3 * m1 * m2]], dtype='complex128')\n", (17832, 18029), True, 'import numpy as np\n'), ((18032, 18236), 'numpy.array', 'np.array', (['[[0, 0, 0], [-1.0j * s1 * c2 * c3 * p1 * p2 * m3, 1.0j * c1 * c3 * p2, -\n 1.0j * s1 * s2 * c3 * p2 * m3 * p4], [1.0j * s2 * c3 * m2 * m4, 0, -\n 1.0j * c2 * c3 * m1 * m2]]'], {'dtype': '"""complex128"""'}), "([[0, 0, 0], [-1.0j * s1 * c2 * c3 * p1 * p2 * m3, 1.0j * c1 * c3 *\n p2, -1.0j * s1 * s2 * c3 * p2 * m3 * p4], [1.0j * s2 * c3 * m2 * m4, 0,\n -1.0j * c2 * c3 * m1 * m2]], dtype='complex128')\n", (18040, 18236), True, 'import numpy as np\n'), ((18240, 18457), 'numpy.array', 'np.array', (['[[0, 1.0j * s1 * p3, 0], [1.0j * s1 * c2 * c3 * p1 * p2 * m3, 0, 1.0j * s1 *\n s2 * c3 * p2 * m3 * p4], [1.0j * s1 * c2 * s3 * p1 * m3 * p5, 0, 1.0j *\n s1 * s2 * s3 * m3 * p4 * p5]]'], {'dtype': '"""complex128"""'}), "([[0, 1.0j * s1 * p3, 0], [1.0j * s1 * c2 * c3 * p1 * p2 * m3, 0, \n 1.0j * s1 * s2 * c3 * p2 * m3 * p4], [1.0j * s1 * c2 * s3 * p1 * m3 *\n p5, 0, 1.0j * s1 * s2 * s3 * m3 * p4 * p5]], dtype='complex128')\n", (18248, 18457), True, 'import numpy as np\n'), ((18454, 18658), 'numpy.array', 'np.array', (['[[0, 0, 1.0j * c1 * s2 * p4], [-1.0j * s2 * s3 * m4 * m5, 0, -1.0j * s1 *\n s2 * c3 * p2 * m3 * p4], [1.0j * s2 * c3 * m2 * m4, 0, -1.0j * s1 * s2 *\n s3 * m3 * p4 * p5]]'], {'dtype': '"""complex128"""'}), "([[0, 0, 1.0j * c1 * s2 * p4], [-1.0j * s2 * s3 * m4 * m5, 0, -1.0j *\n s1 * s2 * c3 * p2 * m3 * p4], [1.0j * s2 * c3 * m2 * m4, 0, -1.0j * s1 *\n s2 * s3 * m3 * p4 * p5]], dtype='complex128')\n", (18462, 18658), True, 'import numpy as np\n'), ((18662, 18866), 'numpy.array', 'np.array', (['[[0, 0, 0], [-1.0j * s2 * s3 * m4 * m5, 0, 1.0j * c2 * s3 * m1 * m5], [-\n 1.0j * s1 * c2 * s3 * p1 * m3 * p5, 1.0j * c1 * s3 * p5, -1.0j * s1 *\n s2 * s3 * m3 * p4 * p5]]'], {'dtype': '"""complex128"""'}), "([[0, 0, 0], [-1.0j * s2 * s3 * m4 * m5, 0, 1.0j * c2 * s3 * m1 *\n m5], [-1.0j * s1 * c2 * s3 * p1 * m3 * p5, 1.0j * c1 * s3 * p5, -1.0j *\n s1 * s2 * s3 * m3 * p4 * p5]], dtype='complex128')\n", (18670, 18866), True, 'import numpy as np\n'), ((20847, 21153), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, \n 0, 0, 0, 0], [0, 0, 0, 0, -1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, \n 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]]'], {'dtype': '"""complex128"""'}), "([[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, \n 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, -1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, \n 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0,\n 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype='complex128')\n", (20855, 21153), True, 'import numpy as np\n'), ((21490, 21520), 'numpy.matmul', 'np.matmul', (['self._cpi', 'diag_mod'], {}), '(self._cpi, diag_mod)\n', (21499, 21520), True, 'import numpy as np\n'), ((23565, 23672), 'numpy.array', 'np.array', (['[[1, 0, 0, -1.0j], [0, 1, -1.0j, 0], [0, -1.0j, 1, 0], [-1.0j, 0, 0, 1]]'], {'dtype': '"""complex128"""'}), "([[1, 0, 0, -1.0j], [0, 1, -1.0j, 0], [0, -1.0j, 1, 0], [-1.0j, 0, \n 0, 1]], dtype='complex128')\n", (23573, 23672), True, 'import numpy as np\n'), ((29612, 29675), 'numpy.pad', 'np.pad', (['(self._U if flipped else I)', '[(0, n), (0, n)]', '"""constant"""'], {}), "(self._U if flipped else I, [(0, n), (0, n)], 'constant')\n", (29618, 29675), True, 'import numpy as np\n'), ((29687, 29750), 'numpy.pad', 'np.pad', (['(I if flipped else self._U)', '[(n, 0), (n, 0)]', '"""constant"""'], {}), "(I if flipped else self._U, [(n, 0), (n, 0)], 'constant')\n", (29693, 29750), True, 'import numpy as np\n'), ((29767, 29786), 'numpy.array', 'np.array', (['(top + bot)'], {}), '(top + bot)\n', (29775, 29786), True, 'import numpy as np\n'), ((35440, 35487), 'numpy.eye', 'np.eye', (['submats[0].shape[0]'], {'dtype': '"""complex128"""'}), "(submats[0].shape[0], dtype='complex128')\n", (35446, 35487), True, 'import numpy as np\n'), ((5095, 5114), 'numpy.eye', 'np.eye', (['(d ** qudits)'], {}), '(d ** qudits)\n', (5101, 5114), True, 'import numpy as np\n'), ((7255, 7264), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (7261, 7264), True, 'import numpy as np\n'), ((7318, 7327), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (7324, 7327), True, 'import numpy as np\n'), ((9116, 9127), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (9124, 9127), True, 'import numpy as np\n'), ((10048, 10057), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (10054, 10057), True, 'import numpy as np\n'), ((10111, 10120), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (10117, 10120), True, 'import numpy as np\n'), ((11267, 11278), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (11275, 11278), True, 'import numpy as np\n'), ((13071, 13082), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (13079, 13082), True, 'import numpy as np\n'), ((13698, 13708), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13705, 13708), True, 'import numpy as np\n'), ((13833, 13863), 'numpy.array', 'np.array', (['[[1, -e1], [e2, e3]]'], {}), '([[1, -e1], [e2, e3]])\n', (13841, 13863), True, 'import numpy as np\n'), ((13887, 13929), 'numpy.array', 'np.array', (['[[0, 0], [1.0j * e2, 1.0j * e3]]'], {}), '([[0, 0], [1.0j * e2, 1.0j * e3]])\n', (13895, 13929), True, 'import numpy as np\n'), ((13949, 13992), 'numpy.array', 'np.array', (['[[0, -1.0j * e1], [0, 1.0j * e3]]'], {}), '([[0, -1.0j * e1], [0, 1.0j * e3]])\n', (13957, 13992), True, 'import numpy as np\n'), ((14063, 14074), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (14071, 14074), True, 'import numpy as np\n'), ((14547, 14570), 'numpy.exp', 'np.exp', (['(1.0j * v[0] / 2)'], {}), '(1.0j * v[0] / 2)\n', (14553, 14570), True, 'import numpy as np\n'), ((14628, 14651), 'numpy.exp', 'np.exp', (['(1.0j * v[0] / 2)'], {}), '(1.0j * v[0] / 2)\n', (14634, 14651), True, 'import numpy as np\n'), ((14850, 14861), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (14858, 14861), True, 'import numpy as np\n'), ((23552, 23562), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (23559, 23562), True, 'import numpy as np\n'), ((29551, 29562), 'numpy.shape', 'np.shape', (['U'], {}), '(U)\n', (29559, 29562), True, 'import numpy as np\n'), ((29587, 29596), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (29593, 29596), True, 'import numpy as np\n'), ((31794, 31812), 'numpy.kron', 'np.kron', (['U', 'matrix'], {}), '(U, matrix)\n', (31801, 31812), True, 'import numpy as np\n'), ((34906, 34939), 'numpy.matmul', 'np.matmul', (['matrix', 'U'], {'out': 'buffer1'}), '(matrix, U, out=buffer1)\n', (34915, 34939), True, 'import numpy as np\n'), ((35680, 35709), 'numpy.matmul', 'np.matmul', (['matrix', 'A'], {'out': 'ba1'}), '(matrix, A, out=ba1)\n', (35689, 35709), True, 'import numpy as np\n'), ((36080, 36113), 'numpy.matmul', 'np.matmul', (['submats[i]', 'B'], {'out': 'bb1'}), '(submats[i], B, out=bb1)\n', (36089, 36113), True, 'import numpy as np\n'), ((13553, 13563), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13560, 13563), True, 'import numpy as np\n'), ((14734, 14757), 'numpy.exp', 'np.exp', (['(1.0j * v[0] / 2)'], {}), '(1.0j * v[0] / 2)\n', (14740, 14757), True, 'import numpy as np\n'), ((26846, 26864), 'numpy.log', 'np.log', (['U.shape[0]'], {}), '(U.shape[0])\n', (26852, 26864), True, 'import numpy as np\n'), ((26865, 26874), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (26871, 26874), True, 'import numpy as np\n'), ((32232, 32245), 'numpy.kron', 'np.kron', (['J', 'M'], {}), '(J, M)\n', (32239, 32245), True, 'import numpy as np\n'), ((32384, 32397), 'numpy.kron', 'np.kron', (['U', 'M'], {}), '(U, M)\n', (32391, 32397), True, 'import numpy as np\n'), ((35986, 36009), 'numpy.matmul', 'np.matmul', (['J', 'B'], {'out': 'bj'}), '(J, B, out=bj)\n', (35995, 36009), True, 'import numpy as np\n'), ((14690, 14713), 'numpy.exp', 'np.exp', (['(1.0j * v[0] / 2)'], {}), '(1.0j * v[0] / 2)\n', (14696, 14713), True, 'import numpy as np\n'), ((36038, 36062), 'numpy.matmul', 'np.matmul', (['A', 'tmp'], {'out': 'J'}), '(A, tmp, out=J)\n', (36047, 36062), True, 'import numpy as np\n'), ((13602, 13621), 'numpy.exp', 'np.exp', (['(1.0j * v[0])'], {}), '(1.0j * v[0])\n', (13608, 13621), True, 'import numpy as np\n'), ((13621, 13649), 'numpy.exp', 'np.exp', (['(1.0j * (v[0] + v[1]))'], {}), '(1.0j * (v[0] + v[1]))\n', (13627, 13649), True, 'import numpy as np\n'), ((32334, 32347), 'numpy.kron', 'np.kron', (['U', 'J'], {}), '(U, J)\n', (32341, 32347), True, 'import numpy as np\n'), ((13581, 13600), 'numpy.exp', 'np.exp', (['(1.0j * v[1])'], {}), '(1.0j * v[1])\n', (13587, 13600), True, 'import numpy as np\n'), ((21421, 21439), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (21437, 21439), True, 'import numpy as np\n')] |
import nltk
from sklearn.preprocessing import normalize
import numpy as np
from gensim.models import KeyedVectors
from mabed.es_connector import Es_connector
model_path = "./word2vec_twitter_model.bin"
model = KeyedVectors.load_word2vec_format(model_path, binary=True,unicode_errors='ignore')
# the index to be read and vectors added
index = "twitterfdl2015mentions"
# get text from tweets
print("Getting tweets")
# my_connector = Es_connector(index=index, doc_type="tweet")
# query = {
# "match_all": {}
# }
# text = my_connector.bigTweetTextSearch({"query":query})
text = "coeur coeur sur toi bricou"
# for each tweet sum all the vectors and get the unitary vector
# maybe remove links ?
# convert a tweet into a vector using the trained model
print(text)
tokens = nltk.word_tokenize(text)
tweet_vec = np.zeros(model['coeur'].shape)
for word in tokens:
print(word)
try:
vector = model[word]
tweet_vec = np.add(tweet_vec, vector)
print(len(vector))
except KeyError as ke:
print(word,"is not in vocabulary")
tweet = normalize(tweet_vec[:,np.newaxis], axis=0)
print(tweet)
| [
"numpy.zeros",
"sklearn.preprocessing.normalize",
"gensim.models.KeyedVectors.load_word2vec_format",
"numpy.add",
"nltk.word_tokenize"
] | [((211, 299), 'gensim.models.KeyedVectors.load_word2vec_format', 'KeyedVectors.load_word2vec_format', (['model_path'], {'binary': '(True)', 'unicode_errors': '"""ignore"""'}), "(model_path, binary=True, unicode_errors=\n 'ignore')\n", (244, 299), False, 'from gensim.models import KeyedVectors\n'), ((773, 797), 'nltk.word_tokenize', 'nltk.word_tokenize', (['text'], {}), '(text)\n', (791, 797), False, 'import nltk\n'), ((810, 840), 'numpy.zeros', 'np.zeros', (["model['coeur'].shape"], {}), "(model['coeur'].shape)\n", (818, 840), True, 'import numpy as np\n'), ((1067, 1110), 'sklearn.preprocessing.normalize', 'normalize', (['tweet_vec[:, np.newaxis]'], {'axis': '(0)'}), '(tweet_vec[:, np.newaxis], axis=0)\n', (1076, 1110), False, 'from sklearn.preprocessing import normalize\n'), ((935, 960), 'numpy.add', 'np.add', (['tweet_vec', 'vector'], {}), '(tweet_vec, vector)\n', (941, 960), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import sys
import numpy as np
from numpy import pi, sqrt, exp, sin, cos, tan, log, log10
import scipy as sp
import scipy.interpolate
import h5py
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
from aux import *
import batch_params as bp
##
## Read
##
from dataclasses import dataclass
@dataclass
class Snapshot:
pass
def load(dotM_p_ref, a_ini, suffix=""):
ss = Snapshot()
fname = '%s/%.0e_%g_%g_%g_%.2f%s.h5' % ('xray', dotM_p_ref, 0.01, 1.5, 1e7, a_ini, suffix)
with h5py.File(fname, 'r') as f:
ss.t = f['t'][:]
ss.a = f['a'][:]
ss.t = np.append(ss.t, [ss.t[-1]])
ss.a = np.append(ss.a, [a_ref])
return ss
p02 = load(5e11, 0.2)
p02_lo = load(2e11, 0.2)
p02_hi = load(1e12, 0.2)
p02_min = load(2e11, 0.2, "_min")
p02_max = load(1e12, 0.2, "_max")
p03 = load(5e11, 0.3)
p03_lo = load(2e11, 0.3)
p03_hi = load(1e12, 0.3)
p03_min = load(2e11, 0.3, "_min")
p03_max = load(1e12, 0.3, "_max")
##
## Plot
##
## rc settings (see http://matplotlib.sourceforge.net/users/customizing.html#customizing-matplotlib)
mpl.rc('font', family='serif')
mpl.rc('font', size='6.0')
mpl.rc('text', usetex=True)
mpl.rc('lines', linewidth=0.75)
mpl.rc('axes', linewidth=0.5)
mpl.rc('legend', frameon=False)
mpl.rc('legend', handlelength=2.5)
figwidth = 8.0 / 2.54 ## convert cm to in
figheight = 13.0 / 2.54 ## convert cm to in
mpl.rc('figure', figsize=[figwidth, figheight])
fig, ax = plt.subplots(nrows=2, ncols=1)
## [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd', u'#8c564b', u'#e377c2', u'#7f7f7f', u'#bcbd22', u'#17becf']
ax_ = ax[0]
## Vertical red bar and the line
rect = mpl.patches.Rectangle((2.1e9, 0), 2.8e9, 1)
ax_.add_collection(PatchCollection([rect], facecolor='k', edgecolor='None', alpha=0.25))
ax_.axvline(3.5e9, ls='-', c='k', alpha=0.5)
ax_.axhline(a_ref/const.AU, ls='-', c='gray')
color = u'#1f77b4'
ax_.fill(np.concatenate((p02_lo.t, p02_hi.t[::-1]))/const.yr,
np.concatenate((p02_lo.a, p02_hi.a[::-1]))/const.AU,
facecolor=color, edgecolor=None, alpha=0.25)
ax_.semilogx(p02.t/const.yr, p02.a/const.AU, c=color)
color = u'#ff7f0e'
ax_.fill(np.concatenate((p03_lo.t, p03_hi.t[::-1]))/const.yr,
np.concatenate((p03_lo.a, p03_hi.a[::-1]))/const.AU,
facecolor=color, edgecolor=None, alpha=0.25)
ax_.semilogx(p03.t/const.yr, p03.a/const.AU, c=color)
legend = [ mpl.lines.Line2D([], [], color='w', label=r"$\alpha = 0.01$"), \
mpl.lines.Line2D([], [], color='w', label=r"$\beta = 1.5$") ]
ax_.legend(handles=legend, loc=(0.62, 0.85), handlelength=0)
ax_.set_title(r"Varying $\dot{M}_\mathrm{ref}$ by factor $2$")
ax_.set_xlabel(r"$t$ [yr]")
ax_.set_xlim(1e7, 5e9)
ax_.set_ylabel(r"$a$ [AU]")
ax_.set_ylim(0, 0.34)
ax_ = ax[1]
## Vertical red bar and the line
rect = mpl.patches.Rectangle((2.1e9, 0), 2.8e9, 1)
ax_.add_collection(PatchCollection([rect], facecolor='k', edgecolor='None', alpha=0.25))
ax_.axvline(3.5e9, ls='-', c='k', alpha=0.5)
ax_.axhline(a_ref/const.AU, ls='-', c='gray')
color = u'#1f77b4'
ax_.fill(np.concatenate((p02_min.t, p02_max.t[::-1]))/const.yr,
np.concatenate((p02_min.a, p02_max.a[::-1]))/const.AU,
facecolor=color, edgecolor=None, alpha=0.25)
ax_.semilogx(p02.t/const.yr, p02.a/const.AU, c=color)
color = u'#ff7f0e'
ax_.fill(np.concatenate((p03_min.t, p03_max.t[::-1]))/const.yr,
np.concatenate((p03_min.a, p03_max.a[::-1]))/const.AU,
facecolor=color, edgecolor=None, alpha=0.25)
ax_.semilogx(p03.t/const.yr, p03.a/const.AU, c=color)
legend = [ mpl.lines.Line2D([], [], color='w', label=r"$\alpha = 0.01$"), \
mpl.lines.Line2D([], [], color='w', label=r"$\beta = 1.5$") ]
ax_.legend(handles=legend, loc=(0.62, 0.85), handlelength=0)
ax_.set_title(r"Varying $L_\mathrm{X,ref}$ by factor $2$")
ax_.set_xlabel(r"$t$ [yr]")
ax_.set_xlim(1e7, 5e9)
ax_.set_ylabel(r"$a$ [AU]")
ax_.set_ylim(0, 0.34)
plt.tight_layout()
plt.savefig('a_var.pdf')
| [
"matplotlib.rc",
"h5py.File",
"numpy.concatenate",
"matplotlib.lines.Line2D",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.subplots",
"numpy.append",
"matplotlib.collections.PatchCollection",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig"
] | [((1214, 1244), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (1220, 1244), True, 'import matplotlib as mpl\n'), ((1245, 1271), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {'size': '"""6.0"""'}), "('font', size='6.0')\n", (1251, 1271), True, 'import matplotlib as mpl\n'), ((1272, 1299), 'matplotlib.rc', 'mpl.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (1278, 1299), True, 'import matplotlib as mpl\n'), ((1300, 1331), 'matplotlib.rc', 'mpl.rc', (['"""lines"""'], {'linewidth': '(0.75)'}), "('lines', linewidth=0.75)\n", (1306, 1331), True, 'import matplotlib as mpl\n'), ((1332, 1361), 'matplotlib.rc', 'mpl.rc', (['"""axes"""'], {'linewidth': '(0.5)'}), "('axes', linewidth=0.5)\n", (1338, 1361), True, 'import matplotlib as mpl\n'), ((1362, 1393), 'matplotlib.rc', 'mpl.rc', (['"""legend"""'], {'frameon': '(False)'}), "('legend', frameon=False)\n", (1368, 1393), True, 'import matplotlib as mpl\n'), ((1394, 1428), 'matplotlib.rc', 'mpl.rc', (['"""legend"""'], {'handlelength': '(2.5)'}), "('legend', handlelength=2.5)\n", (1400, 1428), True, 'import matplotlib as mpl\n'), ((1535, 1582), 'matplotlib.rc', 'mpl.rc', (['"""figure"""'], {'figsize': '[figwidth, figheight]'}), "('figure', figsize=[figwidth, figheight])\n", (1541, 1582), True, 'import matplotlib as mpl\n'), ((1594, 1624), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(1)'}), '(nrows=2, ncols=1)\n', (1606, 1624), True, 'import matplotlib.pyplot as plt\n'), ((1805, 1862), 'matplotlib.patches.Rectangle', 'mpl.patches.Rectangle', (['(2100000000.0, 0)', '(2800000000.0)', '(1)'], {}), '((2100000000.0, 0), 2800000000.0, 1)\n', (1826, 1862), True, 'import matplotlib as mpl\n'), ((2964, 3021), 'matplotlib.patches.Rectangle', 'mpl.patches.Rectangle', (['(2100000000.0, 0)', '(2800000000.0)', '(1)'], {}), '((2100000000.0, 0), 2800000000.0, 1)\n', (2985, 3021), True, 'import matplotlib as mpl\n'), ((4074, 4092), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4090, 4092), True, 'import matplotlib.pyplot as plt\n'), ((4093, 4117), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""a_var.pdf"""'], {}), "('a_var.pdf')\n", (4104, 4117), True, 'import matplotlib.pyplot as plt\n'), ((1868, 1936), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['[rect]'], {'facecolor': '"""k"""', 'edgecolor': '"""None"""', 'alpha': '(0.25)'}), "([rect], facecolor='k', edgecolor='None', alpha=0.25)\n", (1883, 1936), False, 'from matplotlib.collections import PatchCollection\n'), ((2545, 2606), 'matplotlib.lines.Line2D', 'mpl.lines.Line2D', (['[]', '[]'], {'color': '"""w"""', 'label': '"""$\\\\alpha = 0.01$"""'}), "([], [], color='w', label='$\\\\alpha = 0.01$')\n", (2561, 2606), True, 'import matplotlib as mpl\n'), ((2621, 2680), 'matplotlib.lines.Line2D', 'mpl.lines.Line2D', (['[]', '[]'], {'color': '"""w"""', 'label': '"""$\\\\beta = 1.5$"""'}), "([], [], color='w', label='$\\\\beta = 1.5$')\n", (2637, 2680), True, 'import matplotlib as mpl\n'), ((3027, 3095), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['[rect]'], {'facecolor': '"""k"""', 'edgecolor': '"""None"""', 'alpha': '(0.25)'}), "([rect], facecolor='k', edgecolor='None', alpha=0.25)\n", (3042, 3095), False, 'from matplotlib.collections import PatchCollection\n'), ((3712, 3773), 'matplotlib.lines.Line2D', 'mpl.lines.Line2D', (['[]', '[]'], {'color': '"""w"""', 'label': '"""$\\\\alpha = 0.01$"""'}), "([], [], color='w', label='$\\\\alpha = 0.01$')\n", (3728, 3773), True, 'import matplotlib as mpl\n'), ((3788, 3847), 'matplotlib.lines.Line2D', 'mpl.lines.Line2D', (['[]', '[]'], {'color': '"""w"""', 'label': '"""$\\\\beta = 1.5$"""'}), "([], [], color='w', label='$\\\\beta = 1.5$')\n", (3804, 3847), True, 'import matplotlib as mpl\n'), ((623, 644), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (632, 644), False, 'import h5py\n'), ((717, 744), 'numpy.append', 'np.append', (['ss.t', '[ss.t[-1]]'], {}), '(ss.t, [ss.t[-1]])\n', (726, 744), True, 'import numpy as np\n'), ((760, 784), 'numpy.append', 'np.append', (['ss.a', '[a_ref]'], {}), '(ss.a, [a_ref])\n', (769, 784), True, 'import numpy as np\n'), ((2058, 2100), 'numpy.concatenate', 'np.concatenate', (['(p02_lo.t, p02_hi.t[::-1])'], {}), '((p02_lo.t, p02_hi.t[::-1]))\n', (2072, 2100), True, 'import numpy as np\n'), ((2120, 2162), 'numpy.concatenate', 'np.concatenate', (['(p02_lo.a, p02_hi.a[::-1])'], {}), '((p02_lo.a, p02_hi.a[::-1]))\n', (2134, 2162), True, 'import numpy as np\n'), ((2310, 2352), 'numpy.concatenate', 'np.concatenate', (['(p03_lo.t, p03_hi.t[::-1])'], {}), '((p03_lo.t, p03_hi.t[::-1]))\n', (2324, 2352), True, 'import numpy as np\n'), ((2372, 2414), 'numpy.concatenate', 'np.concatenate', (['(p03_lo.a, p03_hi.a[::-1])'], {}), '((p03_lo.a, p03_hi.a[::-1]))\n', (2386, 2414), True, 'import numpy as np\n'), ((3217, 3261), 'numpy.concatenate', 'np.concatenate', (['(p02_min.t, p02_max.t[::-1])'], {}), '((p02_min.t, p02_max.t[::-1]))\n', (3231, 3261), True, 'import numpy as np\n'), ((3281, 3325), 'numpy.concatenate', 'np.concatenate', (['(p02_min.a, p02_max.a[::-1])'], {}), '((p02_min.a, p02_max.a[::-1]))\n', (3295, 3325), True, 'import numpy as np\n'), ((3473, 3517), 'numpy.concatenate', 'np.concatenate', (['(p03_min.t, p03_max.t[::-1])'], {}), '((p03_min.t, p03_max.t[::-1]))\n', (3487, 3517), True, 'import numpy as np\n'), ((3537, 3581), 'numpy.concatenate', 'np.concatenate', (['(p03_min.a, p03_max.a[::-1])'], {}), '((p03_min.a, p03_max.a[::-1]))\n', (3551, 3581), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import os
import logging
from keras import backend as K
from keras.models import load_model
from sklearn.metrics import classification_report
from keras.callbacks import ModelCheckpoint
from keras.models import Model
import tensorflow as tf
import random as rn
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from keras.utils import to_categorical
from keras.utils import plot_model
from keras import optimizers
from models import *
from utils import *
import gc
from keras.callbacks import ReduceLROnPlateau
from sklearn import metrics
import DataGenerator_all
import argparse
from encodes import *
parser = argparse.ArgumentParser(description='TTSS_classifier')
parser.add_argument('--encode', type=str, default='onehot') # onehot or word2vec or embed(1,2..)
parser.add_argument('--epoch', type=int, default=800)
parser.add_argument('--nbt', type=int, default=128)
parser.add_argument('--opt', type=str, default='adam')
parser.add_argument('--feature', type=str, default='all') # aac, dpc, ctd, pseaac1, pseaac2, all
parser.add_argument('--file', type=str, default='VFG-564')
parser.add_argument('--signal', type=int, default=13) # 13, 23, 33, 43, 53
parser.add_argument('--gpuid', type=int, default=2)
parser.add_argument('--pretrain_method', type=str, default="fixnodense")
parser.add_argument('--split_cutoff', type=float, default=0.4)
parser.add_argument('--lr', type=float, default=0.0001)
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpuid)
os.environ['PYTHONHASHSEED'] = '0'
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
np.random.seed(42)
rn.seed(12345)
session_conf = tf.ConfigProto(allow_soft_placement=True)
session_conf.gpu_options.allow_growth = True
# session_conf.gpu_options.per_process_gpu_memory_fraction = 0.3
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# parameters
max_sequence_length = 2500 # 5000
input_dim = 20
data_dir = os.getcwd() + "/data/"
features_dir = data_dir + args.file + "_features/"
all_data = np.load(data_dir + str(args.file) + "_seq.npz", allow_pickle=True)['data']
all_labels = np.load(data_dir + str(args.file) + "_seq.npz", allow_pickle=True)['labels']
class_name_dir = data_dir + args.file + "_class_name"
class_name = load_class_name(class_name_dir)
X_train, X_test, Y_train, Y_test = train_test_split(all_data, all_labels, test_size=args.split_cutoff, stratify=all_labels, random_state=args.signal)
# class_weights = compute_class_weight('balanced', np.unique(Y_train), Y_train) # list
if args.feature == 'all':
feature_data = np.load(features_dir + "aac_ml.npz", allow_pickle=True)['data']
feature_label = np.load(features_dir + "aac_ml.npz", allow_pickle=True)['labels']
dpc_data = np.load(features_dir + "dpc_ml.npz", allow_pickle=True)['data']
feature_data = np.concatenate((feature_data, dpc_data), axis=1)
ctd_data = np.load(features_dir + "ctd_ml.npz", allow_pickle=True)['data']
feature_data = np.concatenate((feature_data, ctd_data), axis=1)
pseaac1_data = np.load(features_dir + "pseaac1_ml.npz", allow_pickle=True)['data']
feature_data = np.concatenate((feature_data, pseaac1_data), axis=1)
pseaac2_data = np.load(features_dir + "pseaac2_ml.npz", allow_pickle=True)['data']
feature_data = np.concatenate((feature_data, pseaac2_data), axis=1)
else:
# feature_data
feature_data = np.load(features_dir + args.feature + "_ml.npz", allow_pickle=True)['data']
feature_label = np.load(features_dir + args.feature + "_ml.npz", allow_pickle=True)['labels']
feature_label = map(int, feature_label)
min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
feature_data = min_max_scaler.fit_transform(feature_data)
X_train_feature, X_test_feature, Y_train_feature, Y_test_feature = train_test_split(feature_data, feature_label, test_size=args.split_cutoff,
stratify=feature_label,
random_state=args.signal)
max_sequence_length_feature = X_train_feature.shape[1]
# check whether feature_label is same with all_labels, whether labels are same after train_test_split.==> answer is yes.
if [all_labels[i] == feature_label[i] for i in range(len(all_labels))]:
print("all_labels is same with feature_label\n ")
else:
print("all_labels is different with feature_label\n")
if [Y_train[i] == Y_train_feature[i] for i in range(len(Y_train))]:
print("Y_train is same with Y_train_feature\n ")
else:
print("Y_train is different with Y_train_feature\n")
# dataloader
# Parameters
params = {'batch_size': args.nbt,
'n_classes': len(class_name),
'encode': args.encode,
}
# Generators
training_generator = DataGenerator_all.DataGenerator2inputs(X_train, Y_train, feature_data=X_train_feature, feature_label=Y_train_feature, **params)
pretrained_model = load_model(os.getcwd() + "COG-755_record/VFNet-H/VFNet-H_seed13_" + args.feature + "512_bestmodel")
if args.pretrain_method == "cnnonly":
# only take concatenate layer
x = pretrained_model.layers[-4].output
x = Dense(len(class_name), activation='softmax', name='prediction', trainable=True)(x)
model = Model(inputs=pretrained_model.inputs, output=x)
for layer in model.layers[:-1]:
layer.trainable = False
# fix non dense
elif args.pretrain_method == "fixnodense":
x = pretrained_model.layers[-2].output
x = Dense(len(class_name), activation='softmax', name='prediction', trainable=True)(x)
model = Model(inputs=pretrained_model.inputs, output=x)
for layer in model.layers[:-3]:
layer.trainable = False
elif args.pretrain_method == "fixall":
# fix all
x = pretrained_model.layers[-2].output
x = Dense(len(class_name), activation='softmax', name='prediction', trainable=True)(x)
model = Model(inputs=pretrained_model.inputs, output=x)
for layer in model.layers[:-1]:
layer.trainable = False
print(model.summary())
adam = optimizers.Adam(lr=args.lr)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
sig = "VFNet-H_TL_COG-755_seed" + str(args.signal) + "_" + args.feature + "_bt" + str(args.nbt) + \
args.pretrain_method + "_lr" + str(args.lr) + "_split" + str(args.split_cutoff)
train_info_record = data_dir + args.file + "_record/VFNet-H_TL/"
if not os.path.exists(train_info_record):
os.makedirs(train_info_record)
f = open(train_info_record + sig + '.txt', 'a')
VFs_model_dir = train_info_record + sig + '_bestmodel'
plot_model(model, to_file=train_info_record + sig + '_model.png', show_shapes=True, show_layer_names=False)
save_point = 100
for epoch_idx in range(1, args.epoch + 1):
print("Epoch: {}\n".format(epoch_idx))
history = model.fit_generator(generator=training_generator, epochs=1, verbose=2)
if epoch_idx % save_point == 0:
best_model_save_dir = VFs_model_dir + str(int(epoch_idx/save_point))
model.save(best_model_save_dir)
del history
gc.collect()
# test
X_test = np.array(onehot_encode(X_test))
best_model = 0
best_acc = 0
best_test_cla_report = {}
best_recall = 0
best_precision = 0
best_f1_score = 0
best_recall2 = 0
best_precision2 = 0
best_f1_score2 = 0
for j in range(1, int(args.epoch/save_point) + 1):
trained_model_dir = VFs_model_dir + str(j)
t_model = load_model(trained_model_dir)
test_pred = t_model.predict([X_test, X_test_feature], batch_size=args.nbt, verbose=0)
test_pred_a = test_pred.tolist()
test_pred_labels = [i.index(max(i)) for i in test_pred_a]
test_y_labels = Y_test.tolist()
f1_score_value_2 = metrics.f1_score(test_y_labels, test_pred_labels, average='macro')
if f1_score_value_2 > best_f1_score2:
best_f1_score2 = f1_score_value_2
best_model = j
best_test_cla_report = classification_report(test_y_labels, test_pred_labels, target_names=list(class_name))
best_acc = metrics.accuracy_score(test_y_labels, test_pred_labels)
best_recall = metrics.recall_score(test_y_labels, test_pred_labels, average='micro')
best_precision = metrics.precision_score(test_y_labels, test_pred_labels, average='micro')
best_f1_score = metrics.f1_score(test_y_labels, test_pred_labels, average='micro')
best_recall2 = metrics.recall_score(test_y_labels, test_pred_labels, average='macro')
best_precision2 = metrics.precision_score(test_y_labels, test_pred_labels, average='macro')
f.write('best model idx: {}\nTest_acc is: {:.4f}\nClassfication report:\n{}\n'.format(best_model, best_acc, best_test_cla_report))
f.write('Micro\nprecision is: {:.4f}\nRecall is: {:.4f}\nF1_score is: {:.4f}\n'.format(best_precision, best_recall, best_f1_score))
f.write('Macro\nprecision is: {:.4f}\nRecall is: {:.4f}\nF1_score is: {:.4f}\n'.format(best_precision2, best_recall2, best_f1_score2))
print(best_f1_score2)
f.close()
print("finish ")
| [
"keras.models.load_model",
"numpy.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"sklearn.preprocessing.MinMaxScaler",
"keras.models.Model",
"tensorflow.ConfigProto",
"gc.collect",
"sklearn.metrics.f1_score",
... | [((718, 772), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""TTSS_classifier"""'}), "(description='TTSS_classifier')\n", (741, 772), False, 'import argparse\n'), ((1553, 1648), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.INFO)\n", (1572, 1648), False, 'import logging\n'), ((1826, 1844), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1840, 1844), True, 'import numpy as np\n'), ((1846, 1860), 'random.seed', 'rn.seed', (['(12345)'], {}), '(12345)\n', (1853, 1860), True, 'import random as rn\n'), ((1877, 1918), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (1891, 1918), True, 'import tensorflow as tf\n'), ((2032, 2056), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1234)'], {}), '(1234)\n', (2050, 2056), True, 'import tensorflow as tf\n'), ((2128, 2147), 'keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (2141, 2147), True, 'from keras import backend as K\n'), ((2618, 2736), 'sklearn.model_selection.train_test_split', 'train_test_split', (['all_data', 'all_labels'], {'test_size': 'args.split_cutoff', 'stratify': 'all_labels', 'random_state': 'args.signal'}), '(all_data, all_labels, test_size=args.split_cutoff,\n stratify=all_labels, random_state=args.signal)\n', (2634, 2736), False, 'from sklearn.model_selection import train_test_split\n'), ((3926, 3974), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (3952, 3974), False, 'from sklearn import preprocessing\n'), ((4106, 4234), 'sklearn.model_selection.train_test_split', 'train_test_split', (['feature_data', 'feature_label'], {'test_size': 'args.split_cutoff', 'stratify': 'feature_label', 'random_state': 'args.signal'}), '(feature_data, feature_label, test_size=args.split_cutoff,\n stratify=feature_label, random_state=args.signal)\n', (4122, 4234), False, 'from sklearn.model_selection import train_test_split\n'), ((5105, 5237), 'DataGenerator_all.DataGenerator2inputs', 'DataGenerator_all.DataGenerator2inputs', (['X_train', 'Y_train'], {'feature_data': 'X_train_feature', 'feature_label': 'Y_train_feature'}), '(X_train, Y_train, feature_data=\n X_train_feature, feature_label=Y_train_feature, **params)\n', (5143, 5237), False, 'import DataGenerator_all\n'), ((6384, 6411), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': 'args.lr'}), '(lr=args.lr)\n', (6399, 6411), False, 'from keras import optimizers\n'), ((6941, 7052), 'keras.utils.plot_model', 'plot_model', (['model'], {'to_file': "(train_info_record + sig + '_model.png')", 'show_shapes': '(True)', 'show_layer_names': '(False)'}), "(model, to_file=train_info_record + sig + '_model.png',\n show_shapes=True, show_layer_names=False)\n", (6951, 7052), False, 'from keras.utils import plot_model\n'), ((2228, 2239), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2237, 2239), False, 'import os\n'), ((3123, 3171), 'numpy.concatenate', 'np.concatenate', (['(feature_data, dpc_data)'], {'axis': '(1)'}), '((feature_data, dpc_data), axis=1)\n', (3137, 3171), True, 'import numpy as np\n'), ((3272, 3320), 'numpy.concatenate', 'np.concatenate', (['(feature_data, ctd_data)'], {'axis': '(1)'}), '((feature_data, ctd_data), axis=1)\n', (3286, 3320), True, 'import numpy as np\n'), ((3429, 3481), 'numpy.concatenate', 'np.concatenate', (['(feature_data, pseaac1_data)'], {'axis': '(1)'}), '((feature_data, pseaac1_data), axis=1)\n', (3443, 3481), True, 'import numpy as np\n'), ((3590, 3642), 'numpy.concatenate', 'np.concatenate', (['(feature_data, pseaac2_data)'], {'axis': '(1)'}), '((feature_data, pseaac2_data), axis=1)\n', (3604, 3642), True, 'import numpy as np\n'), ((5578, 5625), 'keras.models.Model', 'Model', ([], {'inputs': 'pretrained_model.inputs', 'output': 'x'}), '(inputs=pretrained_model.inputs, output=x)\n', (5583, 5625), False, 'from keras.models import Model\n'), ((6762, 6795), 'os.path.exists', 'os.path.exists', (['train_info_record'], {}), '(train_info_record)\n', (6776, 6795), False, 'import os\n'), ((6802, 6832), 'os.makedirs', 'os.makedirs', (['train_info_record'], {}), '(train_info_record)\n', (6813, 6832), False, 'import os\n'), ((7419, 7431), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7429, 7431), False, 'import gc\n'), ((7771, 7800), 'keras.models.load_model', 'load_model', (['trained_model_dir'], {}), '(trained_model_dir)\n', (7781, 7800), False, 'from keras.models import load_model\n'), ((8054, 8120), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['test_y_labels', 'test_pred_labels'], {'average': '"""macro"""'}), "(test_y_labels, test_pred_labels, average='macro')\n", (8070, 8120), False, 'from sklearn import metrics\n'), ((2082, 2104), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (2102, 2104), True, 'import tensorflow as tf\n'), ((2872, 2927), 'numpy.load', 'np.load', (["(features_dir + 'aac_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'aac_ml.npz', allow_pickle=True)\n", (2879, 2927), True, 'import numpy as np\n'), ((2957, 3012), 'numpy.load', 'np.load', (["(features_dir + 'aac_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'aac_ml.npz', allow_pickle=True)\n", (2964, 3012), True, 'import numpy as np\n'), ((3039, 3094), 'numpy.load', 'np.load', (["(features_dir + 'dpc_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'dpc_ml.npz', allow_pickle=True)\n", (3046, 3094), True, 'import numpy as np\n'), ((3188, 3243), 'numpy.load', 'np.load', (["(features_dir + 'ctd_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'ctd_ml.npz', allow_pickle=True)\n", (3195, 3243), True, 'import numpy as np\n'), ((3341, 3400), 'numpy.load', 'np.load', (["(features_dir + 'pseaac1_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'pseaac1_ml.npz', allow_pickle=True)\n", (3348, 3400), True, 'import numpy as np\n'), ((3502, 3561), 'numpy.load', 'np.load', (["(features_dir + 'pseaac2_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'pseaac2_ml.npz', allow_pickle=True)\n", (3509, 3561), True, 'import numpy as np\n'), ((3690, 3757), 'numpy.load', 'np.load', (["(features_dir + args.feature + '_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + args.feature + '_ml.npz', allow_pickle=True)\n", (3697, 3757), True, 'import numpy as np\n'), ((3787, 3854), 'numpy.load', 'np.load', (["(features_dir + args.feature + '_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + args.feature + '_ml.npz', allow_pickle=True)\n", (3794, 3854), True, 'import numpy as np\n'), ((5908, 5955), 'keras.models.Model', 'Model', ([], {'inputs': 'pretrained_model.inputs', 'output': 'x'}), '(inputs=pretrained_model.inputs, output=x)\n', (5913, 5955), False, 'from keras.models import Model\n'), ((8369, 8424), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['test_y_labels', 'test_pred_labels'], {}), '(test_y_labels, test_pred_labels)\n', (8391, 8424), False, 'from sklearn import metrics\n'), ((8448, 8518), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['test_y_labels', 'test_pred_labels'], {'average': '"""micro"""'}), "(test_y_labels, test_pred_labels, average='micro')\n", (8468, 8518), False, 'from sklearn import metrics\n'), ((8545, 8618), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['test_y_labels', 'test_pred_labels'], {'average': '"""micro"""'}), "(test_y_labels, test_pred_labels, average='micro')\n", (8568, 8618), False, 'from sklearn import metrics\n'), ((8644, 8710), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['test_y_labels', 'test_pred_labels'], {'average': '"""micro"""'}), "(test_y_labels, test_pred_labels, average='micro')\n", (8660, 8710), False, 'from sklearn import metrics\n'), ((8735, 8805), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['test_y_labels', 'test_pred_labels'], {'average': '"""macro"""'}), "(test_y_labels, test_pred_labels, average='macro')\n", (8755, 8805), False, 'from sklearn import metrics\n'), ((8833, 8906), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['test_y_labels', 'test_pred_labels'], {'average': '"""macro"""'}), "(test_y_labels, test_pred_labels, average='macro')\n", (8856, 8906), False, 'from sklearn import metrics\n'), ((6232, 6279), 'keras.models.Model', 'Model', ([], {'inputs': 'pretrained_model.inputs', 'output': 'x'}), '(inputs=pretrained_model.inputs, output=x)\n', (6237, 6279), False, 'from keras.models import Model\n'), ((5264, 5275), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5273, 5275), False, 'import os\n')] |
from abc import ABCMeta, abstractmethod
import numpy as np
import os
import pandas as pd
from covid_xprize.standard_predictor.xprize_predictor import XPrizePredictor
import time
SEED = 0
DEFAULT_TEST_COST = 'covid_xprize/validation/data/uniform_random_costs.csv'
TEST_CONFIGS = [
# ('Default', {'start_date': '2020-08-01', 'end_date': '2020-08-05', 'costs': DEFAULT_TEST_COST}),
# ('Jan_Mar_EC_fast', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'equal', 'selected_geos': ['Canada', 'United States', 'United States / Texas']}),
# ('Jan_Mar_RC_fast', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random', 'selected_geos': ['Canada', 'United States', 'United States / Texas']}),
('EQUAL', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'equal'}),
('RANDOM1', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM2', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM3', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM4', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM5', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM6', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM7', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM8', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM9', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM10', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('Jan_RC_NoDec_fast', {'start_date': '2021-01-01', 'end_date': '2021-01-31', 'train_end_date': '2020-11-30', 'costs': 'random', 'selected_geos': ['Canada', 'United States', 'United States / Texas']}),
]
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(ROOT_DIR, os.pardir, 'data')
OXFORD_FILEPATH = os.path.join(DATA_DIR, 'OxCGRT_latest.csv')
OXFORD_URL = 'https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv'
ADDITIONAL_CONTEXT_FILE = os.path.join(DATA_DIR, "Additional_Context_Data_Global.csv")
ADDITIONAL_US_STATES_CONTEXT = os.path.join(DATA_DIR, "US_states_populations.csv")
ADDITIONAL_UK_CONTEXT = os.path.join(DATA_DIR, "uk_populations.csv")
US_PREFIX = "United States / "
COUNTRY_LIST = os.path.join(DATA_DIR, 'countries_regions.txt')
PREDICTOR_PATH = 'covid_xprize/standard_predictor/models/trained_model_weights.h5'
CONTEXT_COLUMNS = ['CountryName',
'RegionName',
'GeoID',
'Date',
'ConfirmedCases',
'ConfirmedDeaths',
'Population']
NPI_MAX_VALUES = {
'C1_School closing': 3,
'C2_Workplace closing': 3,
'C3_Cancel public events': 2,
'C4_Restrictions on gatherings': 4,
'C5_Close public transport': 2,
'C6_Stay at home requirements': 3,
'C7_Restrictions on internal movement': 2,
'C8_International travel controls': 4,
'H1_Public information campaigns': 2,
'H2_Testing policy': 3,
'H3_Contact tracing': 2,
'H6_Facial Coverings': 4
}
NPI_COLUMNS = list(NPI_MAX_VALUES.keys())
CASES_COL = ['NewCases']
PRED_CASES_COL = ['PredictedDailyNewCases']
def gen_test_config(start_date=None,
end_date=None,
train_start_date=None,
train_end_date=None,
costs='random',
selected_geos=COUNTRY_LIST,
predictor=None,
update_data=False):
"""
Loads the data and splits it into train and test sets
Args:
start_date: first date to prescribe for
end_date: last date to prescribe for
train_start_date: first date in the returned train_df
train_end_date: last date in the returned train_df
costs: 'random' / 'equal' / path to csv file with costs
selected_geos: geos to prescribe for (list / path to csv file)
predictor: the predictor model used by the prescriptor
update_data: boolean for whether to re-download the Oxford data
Returns: (train_df, test_df, cost_df)
"""
assert (start_date is not None) and (end_date is not None)
df = load_historical_data(update_data=update_data)
# Test dataframe consists of NPI values up to start_date-1
pd_start_date = pd.to_datetime(start_date)
test_df = df[df['Date'] < pd_start_date].copy()
test_columns = ['GeoID', 'CountryName', 'RegionName', 'Date'] + NPI_COLUMNS
test_df = test_df[test_columns]
if costs not in ['equal', 'random']:
cost_df = pd.read_csv(costs)
else:
cost_df = generate_costs(test_df, mode=costs)
cost_df = add_geo_id(cost_df)
# Discard countries that will not be evaluated
if isinstance(selected_geos, str): # selected_geos can be a path to a csv
country_df = pd.read_csv(selected_geos,
encoding="ISO-8859-1",
dtype={'RegionName': str},
error_bad_lines=False)
country_df['RegionName'] = country_df['RegionName'].replace('', np.nan)
country_df['GeoID'] = np.where(country_df['RegionName'].isnull(),
country_df['CountryName'],
country_df['CountryName'] + ' / ' + country_df['RegionName'])
else: # selected_geos can also be a list of GeoIDs
country_df = pd.DataFrame.from_dict({'GeoID': selected_geos})
test_df = test_df[test_df['GeoID'].isin(country_df['GeoID'].unique())]
cost_df = cost_df[cost_df['GeoID'].isin(country_df['GeoID'].unique())]
# forget all historical data starting from start_date
train_df = df[df['Date'] < pd_start_date]
if predictor is not None:
predictor.df = predictor.df[predictor.df['Date'] < pd_start_date]
if train_start_date is not None:
# forget all historical data before train_start_date
pd_train_start_date = pd.to_datetime(train_start_date)
train_df = train_df[pd_train_start_date <= df['Date']]
if predictor is not None:
predictor.df = predictor.df[pd_train_start_date <= predictor.df['Date']]
if train_end_date is not None:
# forget all historical data after train_end_date
pd_train_end_date = pd.to_datetime(train_end_date)
train_df = train_df[train_df['Date'] <= pd_train_end_date]
if predictor is not None:
predictor.df = predictor.df[predictor.df['Date'] <= pd_train_end_date]
return train_df, test_df, cost_df
def load_historical_data(update_data=False):
if update_data:
print('Updating Oxford data...', end=' ')
df = pd.read_csv(OXFORD_URL,
parse_dates=['Date'],
encoding="ISO-8859-1",
dtype={'RegionName': str,
'RegionCode': str},
error_bad_lines=False)
df.to_csv(OXFORD_FILEPATH)
print('DONE')
else:
df = pd.read_csv(OXFORD_FILEPATH,
parse_dates=['Date'],
encoding="ISO-8859-1",
dtype={'RegionName': str,
'RegionCode': str},
error_bad_lines=False)
print('Using existing data up to date {}'.format(str(df.Date[len(df) - 1]).split()[0]))
df = add_geo_id(df)
# Load dataframe with demographics about each country
context_df = load_additional_context_df()
# Merge the two dataframes
df = df.merge(context_df, on=['GeoID'], how='left', suffixes=('', '_y'))
# Drop countries with no population data
df.dropna(subset=['Population'], inplace=True)
# Fill in missing values
fill_missing_values(df, dropifnocases=True, dropifnodeaths=False)
# Compute number of new cases and deaths each day
df['NewCases'] = df.groupby('GeoID').ConfirmedCases.diff().fillna(0)
df['NewDeaths'] = df.groupby('GeoID').ConfirmedDeaths.diff().fillna(0)
# Replace negative values (which do not make sense for these columns) with 0
df['NewCases'] = df['NewCases'].clip(lower=0)
df['NewDeaths'] = df['NewDeaths'].clip(lower=0)
# Compute smoothed versions of new cases and deaths each day
window_size = 7
df['SmoothNewCases'] = df.groupby('GeoID')['NewCases'].rolling(
window_size, center=False).mean().fillna(0).reset_index(0, drop=True)
df['SmoothNewDeaths'] = df.groupby('GeoID')['NewDeaths'].rolling(
window_size, center=False).mean().fillna(0).reset_index(0, drop=True)
# Compute percent change in new cases and deaths each day
df['CaseRatio'] = df.groupby('GeoID').SmoothNewCases.pct_change(
).fillna(0).replace(np.inf, 0) + 1
df['DeathRatio'] = df.groupby('GeoID').SmoothNewDeaths.pct_change(
).fillna(0).replace(np.inf, 0) + 1
# Add column for proportion of population infected
df['ProportionInfected'] = df['ConfirmedCases'] / df['Population']
# Create column of value to predict
df['PredictionRatio'] = df['CaseRatio'] / (1 - df['ProportionInfected'])
return df
def add_geo_id(df):
df['RegionName'] = df['RegionName'].replace('', np.nan)
df['GeoID'] = np.where(df['RegionName'].isnull(),
df['CountryName'],
df['CountryName'] + ' / ' + df['RegionName'])
return df
def load_additional_context_df():
# File containing the population for each country
# Note: this file contains only countries population, not regions
additional_context_df = pd.read_csv(ADDITIONAL_CONTEXT_FILE,
usecols=['CountryName', 'Population'])
additional_context_df['GeoID'] = additional_context_df['CountryName']
# US states population
additional_us_states_df = pd.read_csv(ADDITIONAL_US_STATES_CONTEXT,
usecols=['NAME', 'POPESTIMATE2019'])
# Rename the columns to match measures_df ones
additional_us_states_df.rename(columns={'POPESTIMATE2019': 'Population'}, inplace=True)
# Prefix with country name to match measures_df
additional_us_states_df['GeoID'] = US_PREFIX + additional_us_states_df['NAME']
# Append the new data to additional_df
additional_context_df = additional_context_df.append(additional_us_states_df)
# UK population
additional_uk_df = pd.read_csv(ADDITIONAL_UK_CONTEXT)
# Append the new data to additional_df
additional_context_df = additional_context_df.append(additional_uk_df)
return additional_context_df
def fill_missing_values(df, dropifnocases=True, dropifnodeaths=False):
df.update(df.groupby('GeoID').ConfirmedCases.apply(
lambda group: group.interpolate(limit_area='inside')))
df.update(df.groupby('GeoID').ConfirmedDeaths.apply(
lambda group: group.interpolate(limit_area='inside')))
if dropifnocases:
# Drop country / regions for which no number of cases is available
df.dropna(subset=['ConfirmedCases'], inplace=True)
if dropifnodeaths:
# Drop country / regions for which no number of deaths is available
df.dropna(subset=['ConfirmedDeaths'], inplace=True)
# if NPI value is not available, set it to 0
for npi_column in NPI_COLUMNS:
df.update(df.groupby('GeoID')[npi_column].ffill().fillna(0))
def generate_costs(df, mode='random'):
"""
Returns df of costs for each NPI for each geo according to distribution.
Costs always sum to #NPIs (i.e., len(NPI_COLUMNS)).
Available distributions:
- 'ones': cost is 1 for each NPI.
- 'random': costs are sampled uniformly across NPIs independently
for each geo.
"""
assert mode in ['equal', 'random'], \
f'Unsupported mode {mode}'
# reduce df to one row per geo
df = df.groupby(['CountryName', 'RegionName'], dropna=False).mean().reset_index()
# reduce to geo id info
df = df[['CountryName', 'RegionName']]
if mode == 'equal':
df[NPI_COLUMNS] = 1
elif mode == 'random':
# generate weights uniformly for each geo independently.
nb_geos = len(df)
nb_ips = len(NPI_COLUMNS)
samples = np.random.uniform(size=(nb_ips, nb_geos))
weights = nb_ips * samples / samples.sum(axis=0)
df[NPI_COLUMNS] = weights.T
return df
def weight_prescriptions_by_cost(pres_df, cost_df):
"""
Weight prescriptions by their costs.
"""
weighted_df = pres_df.merge(cost_df, how='outer', on=['CountryName', 'RegionName'], suffixes=('_pres', '_cost'))
for npi_col in NPI_COLUMNS:
weighted_df[npi_col] = weighted_df[npi_col + '_pres'] * weighted_df[npi_col + '_cost']
return weighted_df
class BasePrescriptorMeta(ABCMeta):
"""
Forces subclasses to implement abstract_attributes
"""
abstract_attributes = []
def __call__(cls, *args, **kwargs):
obj = super(BasePrescriptorMeta, cls).__call__(*args, **kwargs)
missing_attributes = []
for attr_name in obj.abstract_attributes:
if not hasattr(obj, attr_name):
missing_attributes.append(attr_name)
if len(missing_attributes) == 1:
raise TypeError(
"Can't instantiate abstract class {} with abstract attribute '{}'. "
"You must set self.{} in the constructor.".format(
cls.__name__, missing_attributes[0], missing_attributes[0]
)
)
elif len(missing_attributes) > 1:
raise TypeError(
"Can't instantiate abstract class {} with abstract attributes {}. "
"For example, you must set self.{} in the constructor.".format(
cls.__name__, missing_attributes, missing_attributes[0]
)
)
return obj
class BasePrescriptor(object, metaclass=BasePrescriptorMeta):
"""
Abstract class for prescriptors. Currently provides evaluation for classes that inherit from this class.
Requires that subclasses implement 2 methods:
fit(hist_df: pd.DataFrame) - train the model using the standard predictor and some historical real data
prescribe(start_date_str: str,
end_date_str: str,
prior_ips_df: pd.DataFrame
cost_df: pd.DataFrame) -> pd.DataFrame - make prescriptions for the given period
The following attribute is set on the initialization of this class and should NOT be modified:
predictor - standard predictor model
"""
abstract_attributes = ['predictor']
def __init__(self, seed=SEED):
if seed is not None:
self.set_seed(seed)
self.predictor = XPrizePredictor(PREDICTOR_PATH, OXFORD_FILEPATH)
@abstractmethod
def fit(self, hist_df):
pass
@abstractmethod
def prescribe(self, start_date_str, end_date_str, prior_ips_df, cost_df):
pass
def evaluate(self, output_file_path=None, fit=True, prescribe=True, verbose=True):
all_tests_df = []
for test_name, test_config in TEST_CONFIGS:
if verbose:
print('Running test:', test_name)
# reinitialize the predictor because it is modified inside the loop by gen_test_config
self.predictor = XPrizePredictor(PREDICTOR_PATH, OXFORD_FILEPATH)
# generate the test config
train_df, test_df, cost_df = gen_test_config(predictor=self.predictor, **test_config)
start_date, end_date = test_config['start_date'], test_config['end_date']
# train the model
if fit:
if verbose:
print('...training the prescriptor model')
self.fit(train_df)
if not prescribe:
continue
# generate prescriptions
if verbose:
print('...generating prescriptions')
start_time = time.time()
pres_df = self.prescribe(start_date_str=start_date,
end_date_str=end_date,
prior_ips_df=test_df,
cost_df=cost_df)
if verbose:
print('...prescriptions took {} seconds to be generated'.format(round(time.time() - start_time, 2)))
# check if all required columns are in the returned dataframe
assert 'Date' in pres_df.columns
assert 'CountryName' in pres_df.columns
assert 'RegionName' in pres_df.columns
assert 'PrescriptionIndex' in pres_df.columns
for npi_col in NPI_COLUMNS:
assert npi_col in pres_df.columns
# generate predictions with the given prescriptions
if verbose:
print('...generating predictions for all prescriptions')
pred_dfs = []
for idx in pres_df['PrescriptionIndex'].unique():
idx_df = pres_df[pres_df['PrescriptionIndex'] == idx]
idx_df = idx_df.drop(columns='PrescriptionIndex') # predictor doesn't need this
last_known_date = self.predictor.df['Date'].max()
if last_known_date < pd.to_datetime(idx_df['Date'].min()) - np.timedelta64(1, 'D'):
# append prior NPIs to the prescripted ones because the predictor will need them
idx_df = idx_df.append(test_df[test_df['Date'] > last_known_date].drop(columns='GeoID'))
pred_df = self.predictor.predict(start_date, end_date, idx_df)
pred_df['PrescriptionIndex'] = idx
pred_dfs.append(pred_df)
pred_df = pd.concat(pred_dfs)
# aggregate cases by prescription index and geo
agg_pred_df = pred_df.groupby(['CountryName',
'RegionName',
'PrescriptionIndex'], dropna=False).mean().reset_index()
# only use costs of geos we've predicted for
cost_df = cost_df[cost_df['CountryName'].isin(agg_pred_df['CountryName']) &
cost_df['RegionName'].isin(agg_pred_df['RegionName'])]
# apply weights to prescriptions
pres_df = weight_prescriptions_by_cost(pres_df, cost_df)
# aggregate stringency across npis
pres_df['Stringency'] = pres_df[NPI_COLUMNS].sum(axis=1)
# aggregate stringency by prescription index and geo
agg_pres_df = pres_df.groupby(['CountryName',
'RegionName',
'PrescriptionIndex'], dropna=False).mean().reset_index()
# combine stringency and cases into a single df
df = agg_pres_df.merge(agg_pred_df, how='outer', on=['CountryName',
'RegionName',
'PrescriptionIndex'])
# only keep columns of interest
df = df[['CountryName',
'RegionName',
'PrescriptionIndex',
'PredictedDailyNewCases',
'Stringency']]
df['TestName'] = test_name
all_tests_df.append(df)
# show average (stringency, new_cases) values for each PrescriptionIndex
if verbose:
print(df.groupby('PrescriptionIndex').mean().reset_index())
# save test results in a csv
if output_file_path is not None:
all_tests_df = pd.concat(all_tests_df)
all_tests_df.to_csv(output_file_path)
@staticmethod
def set_seed(seed=SEED):
np.random.seed(seed)
if __name__ == '__main__':
# Run and print different test configurations
for test_name, test_config in TEST_CONFIGS:
train_df, test_df, cost_df = gen_test_config(**test_config)
print(test_name)
print('test dates')
print(test_df.Date.unique)
print('train dates')
print(train_df.Date.unique)
sample_geo = 'Canada'
sample_costs = cost_df[cost_df['GeoID'] == sample_geo][NPI_COLUMNS].head(1)
print('NPI costs for', sample_geo)
for npi_col in NPI_COLUMNS:
print(npi_col, round(float(sample_costs[npi_col]), 2))
print('Sum', round(float(sample_costs.sum(axis=1)), 2))
print()
| [
"numpy.random.uniform",
"os.path.abspath",
"covid_xprize.standard_predictor.xprize_predictor.XPrizePredictor",
"pandas.DataFrame.from_dict",
"numpy.random.seed",
"pandas.read_csv",
"time.time",
"numpy.timedelta64",
"pandas.to_datetime",
"os.path.join",
"pandas.concat"
] | [((2012, 2053), 'os.path.join', 'os.path.join', (['ROOT_DIR', 'os.pardir', '"""data"""'], {}), "(ROOT_DIR, os.pardir, 'data')\n", (2024, 2053), False, 'import os\n'), ((2072, 2115), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""OxCGRT_latest.csv"""'], {}), "(DATA_DIR, 'OxCGRT_latest.csv')\n", (2084, 2115), False, 'import os\n'), ((2249, 2309), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""Additional_Context_Data_Global.csv"""'], {}), "(DATA_DIR, 'Additional_Context_Data_Global.csv')\n", (2261, 2309), False, 'import os\n'), ((2341, 2392), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""US_states_populations.csv"""'], {}), "(DATA_DIR, 'US_states_populations.csv')\n", (2353, 2392), False, 'import os\n'), ((2417, 2461), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""uk_populations.csv"""'], {}), "(DATA_DIR, 'uk_populations.csv')\n", (2429, 2461), False, 'import os\n'), ((2508, 2555), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""countries_regions.txt"""'], {}), "(DATA_DIR, 'countries_regions.txt')\n", (2520, 2555), False, 'import os\n'), ((1974, 1999), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1989, 1999), False, 'import os\n'), ((4563, 4589), 'pandas.to_datetime', 'pd.to_datetime', (['start_date'], {}), '(start_date)\n', (4577, 4589), True, 'import pandas as pd\n'), ((9857, 9932), 'pandas.read_csv', 'pd.read_csv', (['ADDITIONAL_CONTEXT_FILE'], {'usecols': "['CountryName', 'Population']"}), "(ADDITIONAL_CONTEXT_FILE, usecols=['CountryName', 'Population'])\n", (9868, 9932), True, 'import pandas as pd\n'), ((10105, 10183), 'pandas.read_csv', 'pd.read_csv', (['ADDITIONAL_US_STATES_CONTEXT'], {'usecols': "['NAME', 'POPESTIMATE2019']"}), "(ADDITIONAL_US_STATES_CONTEXT, usecols=['NAME', 'POPESTIMATE2019'])\n", (10116, 10183), True, 'import pandas as pd\n'), ((10674, 10708), 'pandas.read_csv', 'pd.read_csv', (['ADDITIONAL_UK_CONTEXT'], {}), '(ADDITIONAL_UK_CONTEXT)\n', (10685, 10708), True, 'import pandas as pd\n'), ((4818, 4836), 'pandas.read_csv', 'pd.read_csv', (['costs'], {}), '(costs)\n', (4829, 4836), True, 'import pandas as pd\n'), ((5087, 5190), 'pandas.read_csv', 'pd.read_csv', (['selected_geos'], {'encoding': '"""ISO-8859-1"""', 'dtype': "{'RegionName': str}", 'error_bad_lines': '(False)'}), "(selected_geos, encoding='ISO-8859-1', dtype={'RegionName': str},\n error_bad_lines=False)\n", (5098, 5190), True, 'import pandas as pd\n'), ((5684, 5732), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'GeoID': selected_geos}"], {}), "({'GeoID': selected_geos})\n", (5706, 5732), True, 'import pandas as pd\n'), ((6222, 6254), 'pandas.to_datetime', 'pd.to_datetime', (['train_start_date'], {}), '(train_start_date)\n', (6236, 6254), True, 'import pandas as pd\n'), ((6559, 6589), 'pandas.to_datetime', 'pd.to_datetime', (['train_end_date'], {}), '(train_end_date)\n', (6573, 6589), True, 'import pandas as pd\n'), ((6942, 7084), 'pandas.read_csv', 'pd.read_csv', (['OXFORD_URL'], {'parse_dates': "['Date']", 'encoding': '"""ISO-8859-1"""', 'dtype': "{'RegionName': str, 'RegionCode': str}", 'error_bad_lines': '(False)'}), "(OXFORD_URL, parse_dates=['Date'], encoding='ISO-8859-1', dtype=\n {'RegionName': str, 'RegionCode': str}, error_bad_lines=False)\n", (6953, 7084), True, 'import pandas as pd\n'), ((7292, 7438), 'pandas.read_csv', 'pd.read_csv', (['OXFORD_FILEPATH'], {'parse_dates': "['Date']", 'encoding': '"""ISO-8859-1"""', 'dtype': "{'RegionName': str, 'RegionCode': str}", 'error_bad_lines': '(False)'}), "(OXFORD_FILEPATH, parse_dates=['Date'], encoding='ISO-8859-1',\n dtype={'RegionName': str, 'RegionCode': str}, error_bad_lines=False)\n", (7303, 7438), True, 'import pandas as pd\n'), ((15052, 15100), 'covid_xprize.standard_predictor.xprize_predictor.XPrizePredictor', 'XPrizePredictor', (['PREDICTOR_PATH', 'OXFORD_FILEPATH'], {}), '(PREDICTOR_PATH, OXFORD_FILEPATH)\n', (15067, 15100), False, 'from covid_xprize.standard_predictor.xprize_predictor import XPrizePredictor\n'), ((20115, 20135), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (20129, 20135), True, 'import numpy as np\n'), ((12518, 12559), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(nb_ips, nb_geos)'}), '(size=(nb_ips, nb_geos))\n', (12535, 12559), True, 'import numpy as np\n'), ((15644, 15692), 'covid_xprize.standard_predictor.xprize_predictor.XPrizePredictor', 'XPrizePredictor', (['PREDICTOR_PATH', 'OXFORD_FILEPATH'], {}), '(PREDICTOR_PATH, OXFORD_FILEPATH)\n', (15659, 15692), False, 'from covid_xprize.standard_predictor.xprize_predictor import XPrizePredictor\n'), ((16290, 16301), 'time.time', 'time.time', ([], {}), '()\n', (16299, 16301), False, 'import time\n'), ((18036, 18055), 'pandas.concat', 'pd.concat', (['pred_dfs'], {}), '(pred_dfs)\n', (18045, 18055), True, 'import pandas as pd\n'), ((19985, 20008), 'pandas.concat', 'pd.concat', (['all_tests_df'], {}), '(all_tests_df)\n', (19994, 20008), True, 'import pandas as pd\n'), ((17609, 17631), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (17623, 17631), True, 'import numpy as np\n'), ((16649, 16660), 'time.time', 'time.time', ([], {}), '()\n', (16658, 16660), False, 'import time\n')] |
import pickle
import numpy as np
import pandas as pd
import sys
sys.path.insert(1, '../src/MyAIGuide/data')
from fitbitDataGatheredFromWebExport import fitbitDataGatheredFromWebExport
from movesDataGatheredFromWebExport import movesDataGatheredFromWebExport
from googleFitGatheredFromWebExport import googleFitGatheredFromWebExport
from storePainIntensitiesForParticipant1 import storePainIntensitiesForParticipant1
from retrieve_mentalstate_participant1 import retrieve_mentalstate_participant1
from storeSportDataParticipant1 import storeSportDataParticipant1
from storeManicTimeBlankScreen import storeManicTimeBlankScreen
from storeManicTime import storeManicTime
# Creation of the dataframe where everything will be stored
i = pd.date_range("2015-11-19", periods=1700, freq="1D")
sLength = len(i)
empty = pd.Series(np.zeros(sLength)).values
d = {
"steps": empty,
"denivelation": empty,
"kneePain": empty,
"handsAndFingerPain": empty,
"foreheadAndEyesPain": empty,
"forearmElbowPain": empty,
"aroundEyesPain": empty,
"shoulderNeckPain": empty,
"movesSteps": empty,
"googlefitSteps": empty,
"generalmood": empty,
"walk": empty,
"roadBike": empty,
"mountainBike": empty,
"swimming": empty,
"surfing": empty,
"climbing": empty,
"viaFerrata": empty,
"alpiSki": empty,
"downSki": empty,
"climbingDenivelation": empty,
"climbingMaxEffortIntensity": empty,
"climbingMeanEffortIntensity": empty,
"swimmingKm": empty,
"manicTimeC1": empty,
"manicTimeC2": empty,
"manicTimeC3": empty,
"manicTimeT": empty,
"manicTimeBlankScreenC1": empty,
"manicTimeBlankScreenC2": empty,
"manicTimeBlankScreenC3": empty,
"manicTimeBlankScreenT": empty,
"manicTimeDelta": empty,
}
data = pd.DataFrame(data=d, index=i)
# Storing fitbit data in dataframe
fname = "../data/raw/ParticipantData/Participant1PublicOM/dailyFitBitPerMonth/"
data = fitbitDataGatheredFromWebExport(fname, data)
# Storing moves data in dataframe
fname = "../data/raw/ParticipantData/Participant1PublicOM/MovesAppData/yearly/summary/"
data = movesDataGatheredFromWebExport(fname, data)
# Storing google fit data in dataframe
filename1 = "../data/raw/ParticipantData/Participant1PublicOM/GoogleFitData/smartphone1/dailyAggregations/dailySummaries.csv"
filename2 = "../data/raw/ParticipantData/Participant1PublicOM/GoogleFitData/smartphone2/dailyAggregations/dailySummaries.csv"
data = googleFitGatheredFromWebExport(filename1, filename2, data)
# Storing pain intensities in dataframe
filename = "../data/raw/ParticipantData/Participant1PublicOM/pain.csv"
data = storePainIntensitiesForParticipant1(filename, data)
# Storing mental state in dataframe
filename = "../data/external/moodAndOtherVariables.csv"
data = retrieve_mentalstate_participant1(filename, data)
# Storing sport data in dataframe
filename = "../data/raw/ParticipantData/Participant1PublicOM/sport.csv"
data = storeSportDataParticipant1(filename, data)
# Storing Manic Time data in dataFrame
fname = "../data/raw/ParticipantData/Participant1PublicOM/computerUsage/computer"
numberlist = ["1", "2", "3"]
data = storeManicTime(fname, numberlist, data)
# Storing Manic Time Blank Screen data in dataframe
fname = "../data/raw/ParticipantData/Participant1PublicOM/computerUsage/computer"
numberlist = ["1", "2", "3"]
data = storeManicTimeBlankScreen(fname, numberlist, data)
# Create Manic Time Delta Column in dataframe
data['manicTimeDelta'] = data['manicTimeT'] - data['manicTimeBlankScreenT'].astype(int)
# Prints the dataframe
pd.set_option('display.max_rows', None)
print(data)
# Saving the dataframe in a txt
output = open("../data/preprocessed/preprocessedDataParticipant1.txt", "wb")
pickle.dump(data, output)
output.close()
| [
"pandas.DataFrame",
"fitbitDataGatheredFromWebExport.fitbitDataGatheredFromWebExport",
"pickle.dump",
"pandas.date_range",
"googleFitGatheredFromWebExport.googleFitGatheredFromWebExport",
"storeManicTime.storeManicTime",
"sys.path.insert",
"retrieve_mentalstate_participant1.retrieve_mentalstate_partic... | [((64, 107), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../src/MyAIGuide/data"""'], {}), "(1, '../src/MyAIGuide/data')\n", (79, 107), False, 'import sys\n'), ((734, 786), 'pandas.date_range', 'pd.date_range', (['"""2015-11-19"""'], {'periods': '(1700)', 'freq': '"""1D"""'}), "('2015-11-19', periods=1700, freq='1D')\n", (747, 786), True, 'import pandas as pd\n'), ((1799, 1828), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'd', 'index': 'i'}), '(data=d, index=i)\n', (1811, 1828), True, 'import pandas as pd\n'), ((1952, 1996), 'fitbitDataGatheredFromWebExport.fitbitDataGatheredFromWebExport', 'fitbitDataGatheredFromWebExport', (['fname', 'data'], {}), '(fname, data)\n', (1983, 1996), False, 'from fitbitDataGatheredFromWebExport import fitbitDataGatheredFromWebExport\n'), ((2127, 2170), 'movesDataGatheredFromWebExport.movesDataGatheredFromWebExport', 'movesDataGatheredFromWebExport', (['fname', 'data'], {}), '(fname, data)\n', (2157, 2170), False, 'from movesDataGatheredFromWebExport import movesDataGatheredFromWebExport\n'), ((2470, 2528), 'googleFitGatheredFromWebExport.googleFitGatheredFromWebExport', 'googleFitGatheredFromWebExport', (['filename1', 'filename2', 'data'], {}), '(filename1, filename2, data)\n', (2500, 2528), False, 'from googleFitGatheredFromWebExport import googleFitGatheredFromWebExport\n'), ((2648, 2699), 'storePainIntensitiesForParticipant1.storePainIntensitiesForParticipant1', 'storePainIntensitiesForParticipant1', (['filename', 'data'], {}), '(filename, data)\n', (2683, 2699), False, 'from storePainIntensitiesForParticipant1 import storePainIntensitiesForParticipant1\n'), ((2800, 2849), 'retrieve_mentalstate_participant1.retrieve_mentalstate_participant1', 'retrieve_mentalstate_participant1', (['filename', 'data'], {}), '(filename, data)\n', (2833, 2849), False, 'from retrieve_mentalstate_participant1 import retrieve_mentalstate_participant1\n'), ((2964, 3006), 'storeSportDataParticipant1.storeSportDataParticipant1', 'storeSportDataParticipant1', (['filename', 'data'], {}), '(filename, data)\n', (2990, 3006), False, 'from storeSportDataParticipant1 import storeSportDataParticipant1\n'), ((3165, 3204), 'storeManicTime.storeManicTime', 'storeManicTime', (['fname', 'numberlist', 'data'], {}), '(fname, numberlist, data)\n', (3179, 3204), False, 'from storeManicTime import storeManicTime\n'), ((3376, 3426), 'storeManicTimeBlankScreen.storeManicTimeBlankScreen', 'storeManicTimeBlankScreen', (['fname', 'numberlist', 'data'], {}), '(fname, numberlist, data)\n', (3401, 3426), False, 'from storeManicTimeBlankScreen import storeManicTimeBlankScreen\n'), ((3586, 3625), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (3599, 3625), True, 'import pandas as pd\n'), ((3748, 3773), 'pickle.dump', 'pickle.dump', (['data', 'output'], {}), '(data, output)\n', (3759, 3773), False, 'import pickle\n'), ((822, 839), 'numpy.zeros', 'np.zeros', (['sLength'], {}), '(sLength)\n', (830, 839), True, 'import numpy as np\n')] |
"""
- Takes fixed points data from fixed point dictionary file
- Computes matrix L and eigenvalues for the fixpoint, located, corresponding to a wave with wave numbers (k1,k2).
- Creates a subfolder and save the result
INPUT:
- args: k1, k2, delta0, tol
- fixpoint_dict.pkl -> dictionary of fixed points
"""
import sys
import logging
import os
import pickle
import numpy as np
import scipy.linalg as lin
import carpet
from sim_physics import solve_cycle, nx, ny, N, get_mtwist
k1,k2, delta0, tol = int(sys.argv[1]), int(sys.argv[2]), float(sys.argv[3]), float(sys.argv[4])
dirname = os.path.dirname(__file__) # sys.argv[3]
def dump_object(obj, filename):
filename = os.path.join(dirname, filename)
print(filename)
with open(filename, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_object(filename):
filename = os.path.join(dirname, filename)
with open(filename, 'rb') as f:
obj = pickle.load(f)
return obj
# Load fixpoints
filename = 'fixpoint_dict.pkl' #'../fixpoint_dict_nx=6_ny=6_tol=1.000E-08.pkl' #
fixpoint_dict = load_object(filename)
def get_fixpoint(k1,k2):
return np.array(fixpoint_dict[k1, k2])
fixpoint = get_fixpoint(k1,k2)
## Eigenvalues functions
"""
2019-07-31: choose any set of N-1 perturbations with zero mean phase
2019-08-27: eigenvalues of L-I -> eigenvalues of ln(L) [logarithms of eigenvalues of L]
"""
def calc_sort_log_evals_evecs(L):
'''
:return: logairthm of eigenvalues, eigenvectors
'''
# Ljapunov spectrum
evals, evecs = lin.eig(L)
evals = np.log(evals)
# Sort eigenvalues
idx = evals.argsort()
evals = evals[idx]
evecs = evecs[:, idx]
return evals, evecs
def get_L3_general(k1, k2, Delta0s, tol):
"""
Input: N-1 perturbation vectors with zero mean phase
N-th perturbation will be added automatically:
Since we have already tested that it's neutral - just set last Delta1 = Delta0 = (1,1,1...1)
"""
Delta1s = [] # list of deviation from the fixpoint after one cycle
fixpoint = get_fixpoint(k1, k2)
for Delta0 in Delta0s: # index of single node that was initially perturbed
# Initial condition
phi0 = fixpoint + Delta0
assert abs(carpet.get_mean_phase(Delta0)) < tol
# Solve
solution = solve_cycle(phi0, tol)
phi1 = solution.y.T[-1]
# Fill matrix row
Delta1 = (phi1 - fixpoint - 2 * np.pi)
Delta1s.append(Delta1)
## Out of Poincare-plane perturbation - make it stay neutral
D0 = np.array([Delta0 for Delta0 in Delta0s] + [np.ones([N])]) # Truncated vectors as rows of a matrix
D1 = np.array([Delta1 for Delta1 in Delta1s] + [np.ones([N])]) # Truncated vectors as rows of a matrix
## Get L
L = lin.solve(D0, D1).transpose() # Transpose - to work with vectors as columns again
return L
def get_L_single(k1, k2, delta0, tol):
"""
N-1 single-node perturbations (+ change other nodes to preserve mean phase)
- Normalized by L_infinity norm
"""
def get_single_node_perturbation(ix, delta0):
Delta = np.zeros(N) - 1 / (N - 1)
Delta[ix] = 1
return Delta * delta0
Delta0s = [get_single_node_perturbation(ix, delta0) for ix in range(0, N - 1)] # get basis, multiply by delta0
return get_L3_general(k1, k2, Delta0s, tol)
def get_mtwist_trig_basis(delta0=1, phi0=0):
'''
ad hoc solution for 2D
Go through all possible cosine,sine of mtwists, keep only the ones which are orthogonal to each other
2019-08-28: phi0 - phase shift all mtwists
Added this one from 2D: can shift by angle
Checked: equivalent to the old one when phi0=0
'''
def zero_vec(vec, eps=10 ** -8):
return lin.norm(vec) * N ** (-1 / 2) < eps
def orthogonal(vec, basis, eps=10 ** -8):
'''
If vector is coaligned with a vector from the set - return True, else False
'''
for b in basis:
if abs(vec @ b.conj()) > eps * lin.norm(vec) * lin.norm(b):
return False
return True
basis = []
for k1 in range(nx):
for k2 in range(ny):
if k1 == 0 and k2 == 0:
continue
cos_mtwist = np.cos(get_mtwist(k1, k2) + phi0)
sin_mtwist = np.sin(get_mtwist(k1, k2) + phi0)
if not zero_vec(cos_mtwist) and orthogonal(cos_mtwist, basis):
basis.append(cos_mtwist)
if not zero_vec(sin_mtwist) and orthogonal(sin_mtwist, basis):
basis.append(sin_mtwist)
Delta0s = [delta0 * b for b in basis]
assert len(Delta0s) == N - 1
return Delta0s
def get_L_mtwist(k1, k2, delta0, tol):
"""
N-1 perturbation - cosines and sines of m-twists
Nth - orthogonal to the Poincare plane; construct L s.t. this is a neutral perturbation
"""
Delta0s = get_mtwist_trig_basis(delta0)
return get_L3_general(k1, k2, Delta0s, tol)
def calc_evals_evecs_mtwist(k1, k2, delta0, tol):
'''
evecs: eigenvectors as columns!
'''
L = get_L_mtwist(k1, k2, delta0, tol)
return calc_sort_log_evals_evecs(L)
def fill_evals_evecs_dict_mtwist(k1, k2, delta0, tol, evals_dict, evecs_dict):
evals, evecs = calc_evals_evecs_mtwist(k1, k2, delta0, tol)
evals_dict[(k1, k2)] = evals
evecs_dict[(k1, k2)] = evecs
logging.info("Finished: k1={} k2={}".format(k1, k2))
###
L_mtwist = get_L_mtwist(k1, k2, delta0, tol)
output_folder = 'out/linear_delta0={:.3E}_tol={:.3E}/'.format(delta0, tol)
os.makedirs(output_folder, exist_ok=True)
filename = output_folder + "/L_mtwist_k1={}_k2={}.npy".format(k1,k2)
np.save(filename, L_mtwist)
# L_log_lin = L_mtwist - sp.eye(N)
# evals, evecs = calc_sort_evals_evecs(L_log_lin)
| [
"scipy.linalg.solve",
"pickle.dump",
"numpy.save",
"carpet.get_mean_phase",
"os.makedirs",
"numpy.log",
"sim_physics.get_mtwist",
"os.path.dirname",
"sim_physics.solve_cycle",
"numpy.zeros",
"scipy.linalg.eig",
"numpy.ones",
"pickle.load",
"numpy.array",
"scipy.linalg.norm",
"os.path.j... | [((586, 611), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (601, 611), False, 'import os\n'), ((5553, 5594), 'os.makedirs', 'os.makedirs', (['output_folder'], {'exist_ok': '(True)'}), '(output_folder, exist_ok=True)\n', (5564, 5594), False, 'import os\n'), ((5664, 5691), 'numpy.save', 'np.save', (['filename', 'L_mtwist'], {}), '(filename, L_mtwist)\n', (5671, 5691), True, 'import numpy as np\n'), ((675, 706), 'os.path.join', 'os.path.join', (['dirname', 'filename'], {}), '(dirname, filename)\n', (687, 706), False, 'import os\n'), ((859, 890), 'os.path.join', 'os.path.join', (['dirname', 'filename'], {}), '(dirname, filename)\n', (871, 890), False, 'import os\n'), ((1148, 1179), 'numpy.array', 'np.array', (['fixpoint_dict[k1, k2]'], {}), '(fixpoint_dict[k1, k2])\n', (1156, 1179), True, 'import numpy as np\n'), ((1550, 1560), 'scipy.linalg.eig', 'lin.eig', (['L'], {}), '(L)\n', (1557, 1560), True, 'import scipy.linalg as lin\n'), ((1573, 1586), 'numpy.log', 'np.log', (['evals'], {}), '(evals)\n', (1579, 1586), True, 'import numpy as np\n'), ((771, 815), 'pickle.dump', 'pickle.dump', (['obj', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(obj, f, pickle.HIGHEST_PROTOCOL)\n', (782, 815), False, 'import pickle\n'), ((941, 955), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (952, 955), False, 'import pickle\n'), ((2318, 2340), 'sim_physics.solve_cycle', 'solve_cycle', (['phi0', 'tol'], {}), '(phi0, tol)\n', (2329, 2340), False, 'from sim_physics import solve_cycle, nx, ny, N, get_mtwist\n'), ((2781, 2798), 'scipy.linalg.solve', 'lin.solve', (['D0', 'D1'], {}), '(D0, D1)\n', (2790, 2798), True, 'import scipy.linalg as lin\n'), ((3118, 3129), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (3126, 3129), True, 'import numpy as np\n'), ((2245, 2274), 'carpet.get_mean_phase', 'carpet.get_mean_phase', (['Delta0'], {}), '(Delta0)\n', (2266, 2274), False, 'import carpet\n'), ((2595, 2607), 'numpy.ones', 'np.ones', (['[N]'], {}), '([N])\n', (2602, 2607), True, 'import numpy as np\n'), ((2703, 2715), 'numpy.ones', 'np.ones', (['[N]'], {}), '([N])\n', (2710, 2715), True, 'import numpy as np\n'), ((3763, 3776), 'scipy.linalg.norm', 'lin.norm', (['vec'], {}), '(vec)\n', (3771, 3776), True, 'import scipy.linalg as lin\n'), ((4037, 4048), 'scipy.linalg.norm', 'lin.norm', (['b'], {}), '(b)\n', (4045, 4048), True, 'import scipy.linalg as lin\n'), ((4263, 4281), 'sim_physics.get_mtwist', 'get_mtwist', (['k1', 'k2'], {}), '(k1, k2)\n', (4273, 4281), False, 'from sim_physics import solve_cycle, nx, ny, N, get_mtwist\n'), ((4322, 4340), 'sim_physics.get_mtwist', 'get_mtwist', (['k1', 'k2'], {}), '(k1, k2)\n', (4332, 4340), False, 'from sim_physics import solve_cycle, nx, ny, N, get_mtwist\n'), ((4021, 4034), 'scipy.linalg.norm', 'lin.norm', (['vec'], {}), '(vec)\n', (4029, 4034), True, 'import scipy.linalg as lin\n')] |
import os
import random
import cv2 as cv
import keras.backend as K
import numpy as np
from config import colors
from data_generator import get_label
from data_generator import get_y
from data_generator import random_choice
from data_generator import safe_crop
from model import build_encoder_decoder
if __name__ == '__main__':
img_rows, img_cols = 320, 320
num_labels = 8
model_weights_path = 'models/model.02-1.3243.hdf5'
model = build_encoder_decoder()
model.load_weights(model_weights_path)
print(model.summary())
filename = 'valid_names.txt'
with open(filename, 'r') as f:
names = f.read().splitlines()
samples = random.sample(names, 10)
root_path = ''
valid_path = 'train_color/'
for i in range(len(samples)):
image_name = samples[i]
filename = os.path.join(valid_path, image_name)
print('Start processing image: {}'.format(filename))
x_test = np.empty((1, img_rows, img_cols, 3), dtype=np.float32)
bgr_img = cv.imread(filename)
height, width = bgr_img.shape[:2]
label = get_label(image_name, root_path)
x, y = random_choice(label)
image = safe_crop(bgr_img, x, y)
label = safe_crop(label, x, y)
x_test[0, :, :, 0:3] = image / 255.
out = model.predict(x_test)
# print(out.shape)
out = np.reshape(out, (img_rows, img_cols, num_labels))
out = np.argmax(out, axis=2)
# print("out.shape: " + str(out.shape))
ret = np.zeros((img_rows, img_cols, 3), np.float32)
for r in range(320):
for c in range(320):
color_id = out[r, c]
# print("color_id: " + str(color_id))
ret[r, c, :] = colors[color_id]
ret = image * 0.6 + ret * 0.4
ret = ret.astype(np.uint8)
y = get_y(label)
label = np.zeros((img_rows, img_cols, 3), np.float32)
for r in range(320):
for c in range(320):
color_id = y[r, c]
# print("color_id: " + str(color_id))
label[r, c, :] = colors[color_id]
label = image * 0.6 + label * 0.4
label = label.astype(np.uint8)
cv.imwrite('images/{}_image.png'.format(i), image)
cv.imwrite('images/{}_out.png'.format(i), ret)
cv.imwrite('images/{}_label.png'.format(i), label)
K.clear_session()
| [
"data_generator.random_choice",
"numpy.argmax",
"random.sample",
"numpy.empty",
"data_generator.get_y",
"numpy.zeros",
"cv2.imread",
"data_generator.safe_crop",
"numpy.reshape",
"data_generator.get_label",
"os.path.join",
"keras.backend.clear_session",
"model.build_encoder_decoder"
] | [((452, 475), 'model.build_encoder_decoder', 'build_encoder_decoder', ([], {}), '()\n', (473, 475), False, 'from model import build_encoder_decoder\n'), ((668, 692), 'random.sample', 'random.sample', (['names', '(10)'], {}), '(names, 10)\n', (681, 692), False, 'import random\n'), ((2391, 2408), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (2406, 2408), True, 'import keras.backend as K\n'), ((831, 867), 'os.path.join', 'os.path.join', (['valid_path', 'image_name'], {}), '(valid_path, image_name)\n', (843, 867), False, 'import os\n'), ((948, 1002), 'numpy.empty', 'np.empty', (['(1, img_rows, img_cols, 3)'], {'dtype': 'np.float32'}), '((1, img_rows, img_cols, 3), dtype=np.float32)\n', (956, 1002), True, 'import numpy as np\n'), ((1021, 1040), 'cv2.imread', 'cv.imread', (['filename'], {}), '(filename)\n', (1030, 1040), True, 'import cv2 as cv\n'), ((1099, 1131), 'data_generator.get_label', 'get_label', (['image_name', 'root_path'], {}), '(image_name, root_path)\n', (1108, 1131), False, 'from data_generator import get_label\n'), ((1148, 1168), 'data_generator.random_choice', 'random_choice', (['label'], {}), '(label)\n', (1161, 1168), False, 'from data_generator import random_choice\n'), ((1185, 1209), 'data_generator.safe_crop', 'safe_crop', (['bgr_img', 'x', 'y'], {}), '(bgr_img, x, y)\n', (1194, 1209), False, 'from data_generator import safe_crop\n'), ((1226, 1248), 'data_generator.safe_crop', 'safe_crop', (['label', 'x', 'y'], {}), '(label, x, y)\n', (1235, 1248), False, 'from data_generator import safe_crop\n'), ((1372, 1421), 'numpy.reshape', 'np.reshape', (['out', '(img_rows, img_cols, num_labels)'], {}), '(out, (img_rows, img_cols, num_labels))\n', (1382, 1421), True, 'import numpy as np\n'), ((1436, 1458), 'numpy.argmax', 'np.argmax', (['out'], {'axis': '(2)'}), '(out, axis=2)\n', (1445, 1458), True, 'import numpy as np\n'), ((1521, 1566), 'numpy.zeros', 'np.zeros', (['(img_rows, img_cols, 3)', 'np.float32'], {}), '((img_rows, img_cols, 3), np.float32)\n', (1529, 1566), True, 'import numpy as np\n'), ((1854, 1866), 'data_generator.get_y', 'get_y', (['label'], {}), '(label)\n', (1859, 1866), False, 'from data_generator import get_y\n'), ((1883, 1928), 'numpy.zeros', 'np.zeros', (['(img_rows, img_cols, 3)', 'np.float32'], {}), '((img_rows, img_cols, 3), np.float32)\n', (1891, 1928), True, 'import numpy as np\n')] |
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QAction, QTableWidget,QTableWidgetItem,QVBoxLayout,QFileDialog
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
from libs.utils import addActions, newAction
from functools import partial
import os
import numpy as np
import pandas as pd
class TableWindow(QMainWindow):
def __init__(self, parent=None, data=None, save_dir='', col_labels=None, row_labels=None, title=''):
super(TableWindow, self).__init__(parent)
self.data = np.array(data)
self.col_labels = col_labels
self.row_labels = row_labels
self.save_dir, self.filename = os.path.split(save_dir)
self.filename = self.filename.replace(".mjpeg", ".txt")[:-4]
self.title = title
self.left = 0
self.top = 0
self.width = 300
self.height = 200
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.createTable()
self.setCentralWidget(self.tableWidget)
export = self.menuBar().addMenu("Export")
action = partial(newAction, self)
clipboard = action(('Copy'), self.to_clipboard, 'Ctrl+C', 'copy', 'Copy to Clipboard')
excel = action(('Excel (xls)'), self.to_excel, '', 'xls', 'Export to excel', enabled=self.data.shape[1]<256)
csv = action(('CSV (csv)'), self.to_csv, '', 'csv', 'Export to csv')
addActions(export, [clipboard, excel, csv])
def createTable(self):
# Create table
self.tableWidget = QTableWidget()
self.tableWidget.setRowCount(self.data.shape[0])
self.tableWidget.setColumnCount(self.data.shape[1])
self.tableWidget.setHorizontalHeaderLabels(self.col_labels)
self.tableWidget.setVerticalHeaderLabels(self.row_labels)
for i, row in enumerate(self.data):
for j, val in enumerate(row):
self.tableWidget.setItem(i,j, QTableWidgetItem(str(val)))
self.tableWidget.move(0,0)
def to_excel(self):
path = self.saveFileDialog('.xls')
df = pd.DataFrame(self.data, index=self.row_labels, columns=self.col_labels)
df.to_excel(path)
def to_csv(self):
path = self.saveFileDialog('.csv')
df = pd.DataFrame(self.data, index=self.row_labels, columns=self.col_labels)
df.to_csv(path)
def to_clipboard(self):
df = pd.DataFrame(self.data, index=self.row_labels, columns=self.col_labels)
df.to_clipboard()
def saveFileDialog(self, ext=".txt"):
caption = 'Choose File'
filters = 'File (*%s)' % ext
openDialogPath = self.save_dir
dlg = QFileDialog(self, caption, openDialogPath, filters)
dlg.setDefaultSuffix(ext[1:])
dlg.setAcceptMode(QFileDialog.AcceptSave)
dlg.selectFile(self.filename+'_'+self.title)
dlg.setOption(QFileDialog.DontUseNativeDialog, False)
if dlg.exec_():
return dlg.selectedFiles()[0]
return '' | [
"pandas.DataFrame",
"functools.partial",
"PyQt5.QtWidgets.QTableWidget",
"numpy.array",
"libs.utils.addActions",
"os.path.split",
"PyQt5.QtWidgets.QFileDialog"
] | [((525, 539), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (533, 539), True, 'import numpy as np\n'), ((653, 676), 'os.path.split', 'os.path.split', (['save_dir'], {}), '(save_dir)\n', (666, 676), False, 'import os\n'), ((1138, 1162), 'functools.partial', 'partial', (['newAction', 'self'], {}), '(newAction, self)\n', (1145, 1162), False, 'from functools import partial\n'), ((1460, 1503), 'libs.utils.addActions', 'addActions', (['export', '[clipboard, excel, csv]'], {}), '(export, [clipboard, excel, csv])\n', (1470, 1503), False, 'from libs.utils import addActions, newAction\n'), ((1590, 1604), 'PyQt5.QtWidgets.QTableWidget', 'QTableWidget', ([], {}), '()\n', (1602, 1604), False, 'from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QAction, QTableWidget, QTableWidgetItem, QVBoxLayout, QFileDialog\n'), ((2136, 2207), 'pandas.DataFrame', 'pd.DataFrame', (['self.data'], {'index': 'self.row_labels', 'columns': 'self.col_labels'}), '(self.data, index=self.row_labels, columns=self.col_labels)\n', (2148, 2207), True, 'import pandas as pd\n'), ((2317, 2388), 'pandas.DataFrame', 'pd.DataFrame', (['self.data'], {'index': 'self.row_labels', 'columns': 'self.col_labels'}), '(self.data, index=self.row_labels, columns=self.col_labels)\n', (2329, 2388), True, 'import pandas as pd\n'), ((2455, 2526), 'pandas.DataFrame', 'pd.DataFrame', (['self.data'], {'index': 'self.row_labels', 'columns': 'self.col_labels'}), '(self.data, index=self.row_labels, columns=self.col_labels)\n', (2467, 2526), True, 'import pandas as pd\n'), ((2718, 2769), 'PyQt5.QtWidgets.QFileDialog', 'QFileDialog', (['self', 'caption', 'openDialogPath', 'filters'], {}), '(self, caption, openDialogPath, filters)\n', (2729, 2769), False, 'from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QAction, QTableWidget, QTableWidgetItem, QVBoxLayout, QFileDialog\n')] |
"""Convenience functions for Gaussian filtering and smoothing.
We support the following methods:
- ekf0: Extended Kalman filtering based on a zero-th order Taylor
approximation [1]_, [2]_, [3]_. Also known as "PFOS".
- ekf1: Extended Kalman filtering [3]_.
- ukf: Unscented Kalman filtering [3]_.
- eks0: Extended Kalman smoothing based on a zero-th order Taylor
approximation [4]_.
- eks1: Extended Kalman smoothing [4]_.
- uks: Unscented Kalman smoothing.
References
----------
.. [1] https://arxiv.org/pdf/1610.05261.pdf
.. [2] https://arxiv.org/abs/1807.09737
.. [3] https://arxiv.org/abs/1810.03440
.. [4] https://arxiv.org/pdf/2004.00623.pdf
"""
import numpy as np
import probnum.filtsmooth as pnfs
from probnum.diffeq import steprule
from probnum.diffeq.odefiltsmooth import ivp2filter
from probnum.diffeq.odefiltsmooth.ivpfiltsmooth import GaussianIVPFilter
def probsolve_ivp(
ivp,
method="eks0",
which_prior="ibm1",
atol=None,
rtol=None,
step=None,
firststep=None,
**kwargs
):
"""Solve initial value problem with Gaussian filtering and smoothing.
Numerically computes a Gauss-Markov process which solves numerically
the initial value problem (IVP) based on a system of first order
ordinary differential equations (ODEs)
.. math:: \\dot x(t) = f(t, x(t)), \\quad x(t_0) = x_0,
\\quad t \\in [t_0, T]
by regarding it as a (nonlinear) Gaussian filtering (and smoothing)
problem [3]_. For some configurations it recovers certain multistep
methods [1]_.
Convergence rates of filtering [2]_ and smoothing [4]_ are
comparable to those of methods of Runge-Kutta type.
This function turns a prior-string into an :class:`ODEPrior`, a
method-string into a filter/smoother of class :class:`GaussFiltSmooth`, creates a
:class:`GaussianIVPFilter` object and calls the :meth:`solve()` method. For
advanced usage we recommend to do this process manually which
enables advanced methods of tuning the algorithm.
This function supports the methods:
extended Kalman filtering based on a zero-th order Taylor
approximation (EKF0),
extended Kalman filtering (EKF1),
unscented Kalman filtering (UKF),
extended Kalman smoothing based on a zero-th order Taylor
approximation (EKS0),
extended Kalman smoothing (EKS1), and
unscented Kalman smoothing (UKS).
Arguments
---------
ivp : IVP
Initial value problem to be solved.
step : float
Step size :math:`h` of the solver. This defines the
discretisation mesh as each proposed step is equal to :math:`h`
and all proposed steps are accepted.
Only one of out of ``step`` and ``tol`` is set.
tol : float
Tolerance :math:`\\varepsilon` of the adaptive step scheme.
We implement the scheme proposed by Schober et al., accepting a
step if the absolute as well as the relative error estimate are
smaller than the tolerance,
:math:`\\max\\{e, e / |y|\\} \\leq \\varepsilon`.
Only one of out of ``step`` and ``tol`` is set.
which_prior : str, optional
Which prior is to be used. Default is an IBM(1), further support
for IBM(:math:`q`), IOUP(:math:`q`), Matern(:math:`q+1/2`),
:math:`q\\in\\{1, 2, 3, 4\\}` is provided. The available
options are
====================== ========================================
IBM(:math:`q`) ``'ibm1'``, ``'ibm2'``, ``'ibm3'``,
``'ibm4'``
IOUP(:math:`q`) ``'ioup1'``, ``'ioup2'``, ``'ioup3'``,
``'ioup4'``
Matern(:math:`q+0.5`) ``'matern32'``, ``'matern52'``,
``'matern72'``, ``'matern92'``
====================== ========================================
The type of prior relates to prior assumptions about the
derivative of the solution. The IBM(:math:`q`) prior leads to a
:math:`q`-th order method that is recommended if little to no
prior information about the solution is available. On the other
hand, if the :math:`q`-th derivative is expected to regress to
zero, an IOUP(:math:`q`) prior might be suitable.
method : str, optional
Which method is to be used. Default is ``ekf0`` which is the
method proposed by Schober et al.. The available
options are
================================================ ==============
Extended Kalman filtering/smoothing (0th order) ``'ekf0'``,
``'eks0'``
Extended Kalman filtering/smoothing (1st order) ``'ekf1'``,
``'eks1'``
Unscented Kalman filtering/smoothing ``'ukf'``,
``'uks'``
================================================ ==============
First order extended Kalman filtering and smoothing methods
require Jacobians of the RHS-vector field of the IVP. The
uncertainty estimates as returned by EKF1/S1 and UKF/S appear to
be more reliable than those of EKF0/S0. The latter is more
stable when it comes to very small steps.
firststep : float, optional
First suggested step :math:`h_0` for adaptive step size scheme.
Default is None which lets the solver start with the suggestion
:math:`h_0 = T - t_0`. For low accuracy it might be more
efficient to start out with smaller :math:`h_0` so that the
first acceptance occurs earlier.
Returns
-------
solution : KalmanODESolution
Solution of the ODE problem.
Contains fields:
t : :obj:`np.ndarray`, shape=(N,)
Mesh used by the solver to compute the solution.
It includes the initial time :math:`t_0` but not necessarily the
final time :math:`T`.
y : :obj:`list` of :obj:`RandomVariable`, length=N
Discrete-time solution at times :math:`t_1, ..., t_N`,
as a list of random variables.
The means and covariances can be accessed with ``solution.y.mean``
and ``solution.y.cov``.
See Also
--------
GaussianIVPFilter : Solve IVPs with Gaussian filtering and smoothing
KalmanODESolution : Solution of ODE problems based on Gaussian filtering and smoothing.
References
----------
.. [1] <NAME>., <NAME>. and Hennig, P..
A probabilistic model for the numerical solution of initial
value problems.
Statistics and Computing, 2019.
.. [2] <NAME>., <NAME>., and <NAME>..
Convergence rates of Gaussian ODE filters.
2019.
.. [3] <NAME>., <NAME>., <NAME>., and <NAME>..
Probabilistic solutions to ordinary differential equations as
non-linear Bayesian filtering: a new perspective.
Statistics and Computing, 2019.
.. [4] <NAME>., <NAME>., and <NAME>..
Bayesian ODE solvers: the maximum a posteriori estimate.
2019.
Examples
--------
>>> from probnum.diffeq import logistic, probsolve_ivp
>>> from probnum import random_variables as rvs
>>> import numpy as np
>>> initrv = rvs.Constant(0.15)
>>> ivp = logistic(timespan=[0., 1.5], initrv=initrv, params=(4, 1))
>>> solution = probsolve_ivp(ivp, method="ekf0", step=0.1)
>>> print(np.round(solution.y.mean, 2))
[[0.15]
[0.21]
[0.28]
[0.36]
[0.46]
[0.56]
[0.65]
[0.74]
[0.81]
[0.86]
[0.9 ]
[0.93]
[0.95]
[0.97]
[0.98]
[0.98]]
>>> initrv = rvs.Constant(0.15)
>>> ivp = logistic(timespan=[0., 1.5], initrv=initrv, params=(4, 1))
>>> solution = probsolve_ivp(ivp, method="eks1", which_prior="ioup3", step=0.1)
>>> print(np.round(solution.y.mean, 2))
[[0.15]
[0.21]
[0.28]
[0.37]
[0.47]
[0.57]
[0.66]
[0.74]
[0.81]
[0.87]
[0.91]
[0.93]
[0.96]
[0.97]
[0.98]
[0.99]]
"""
stprl = _create_steprule(atol, rtol, step, firststep, ivp)
prior = _string2prior(ivp, which_prior, **kwargs)
gfilt = _create_filter(ivp, prior, method, **kwargs)
with_smoothing = method[-2] == "s" or method[-1] == "s"
solver = GaussianIVPFilter(ivp, gfilt, with_smoothing=with_smoothing)
solution = solver.solve(steprule=stprl)
return solution
def _create_filter(ivp, prior, method, **kwargs):
"""Create the solver object that is used."""
if method not in ["ekf0", "ekf1", "ukf", "eks0", "eks1", "uks"]:
raise ValueError("Method not supported.")
gfilt = _string2filter(ivp, prior, method, **kwargs)
return gfilt
def _create_steprule(atol, rtol, step, firststep, ivp):
_check_step_tol(step, atol, rtol)
if step is not None:
stprl = steprule.ConstantSteps(step)
else:
if firststep is None:
# lazy version of Hairer, Wanner, Norsett, p. 169
norm_y0 = np.linalg.norm(ivp.initrv.mean)
norm_dy0 = np.linalg.norm(ivp(ivp.t0, ivp.initrv.mean))
firststep = 0.01 * norm_y0 / norm_dy0
stprl = steprule.AdaptiveSteps(firststep=firststep, atol=atol, rtol=rtol)
return stprl
def _check_step_tol(step, atol, rtol):
both_none = atol is None and rtol is None and step is None
both_not_none = (atol is not None and rtol is not None) and step is not None
if both_none or both_not_none:
errormsg = "Please specify either a tolerance or a step size."
raise ValueError(errormsg)
atol_not_rtol = atol is not None and rtol is None
rtol_not_atol = rtol is not None and atol is None
if atol_not_rtol or rtol_not_atol:
errormsg = "Please specify either both atol and rtol, or neither."
raise ValueError(errormsg)
def _string2prior(ivp, which_prior, **kwargs):
ibm_family = ["ibm1", "ibm2", "ibm3", "ibm4"]
ioup_family = ["ioup1", "ioup2", "ioup3", "ioup4"]
matern_family = ["matern32", "matern52", "matern72", "matern92"]
if which_prior in ibm_family:
return _string2ibm(ivp, which_prior, **kwargs)
elif which_prior in ioup_family:
return _string2ioup(ivp, which_prior, **kwargs)
elif which_prior in matern_family:
return _string2matern(ivp, which_prior, **kwargs)
else:
raise RuntimeError("It should have been impossible to reach this point.")
def _string2ibm(ivp, which_prior, **kwargs):
if which_prior == "ibm1":
return pnfs.statespace.IBM(
1,
ivp.dimension,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
elif which_prior == "ibm2":
return pnfs.statespace.IBM(
2,
ivp.dimension,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
elif which_prior == "ibm3":
return pnfs.statespace.IBM(
3,
ivp.dimension,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
elif which_prior == "ibm4":
return pnfs.statespace.IBM(
4,
ivp.dimension,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
else:
raise RuntimeError("It should have been impossible to reach this point.")
def _string2ioup(ivp, which_prior, **kwargs):
if "driftspeed" in kwargs.keys():
driftspeed = kwargs["driftspeed"]
else:
driftspeed = 1.0
if which_prior == "ioup1":
return pnfs.statespace.IOUP(
1,
ivp.dimension,
driftspeed,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
elif which_prior == "ioup2":
return pnfs.statespace.IOUP(
2,
ivp.dimension,
driftspeed,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
elif which_prior == "ioup3":
return pnfs.statespace.IOUP(
3,
ivp.dimension,
driftspeed,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
elif which_prior == "ioup4":
return pnfs.statespace.IOUP(
4,
ivp.dimension,
driftspeed,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
else:
raise RuntimeError("It should have been impossible to reach this point.")
def _string2matern(ivp, which_prior, **kwargs):
if "lengthscale" in kwargs.keys():
lengthscale = kwargs["lengthscale"]
else:
lengthscale = 1.0
if which_prior == "matern32":
return pnfs.statespace.Matern(
1,
ivp.dimension,
lengthscale,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
elif which_prior == "matern52":
return pnfs.statespace.Matern(
2,
ivp.dimension,
lengthscale,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
elif which_prior == "matern72":
return pnfs.statespace.Matern(
3,
ivp.dimension,
lengthscale,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
elif which_prior == "matern92":
return pnfs.statespace.Matern(
4,
ivp.dimension,
lengthscale,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
else:
raise RuntimeError("It should have been impossible to reach this point.")
def _string2filter(_ivp, _prior, _method, **kwargs):
if "evlvar" in kwargs.keys():
evlvar = kwargs["evlvar"]
else:
evlvar = 0.0
if _method in ("ekf0", "eks0"):
return ivp2filter.ivp2ekf0(_ivp, _prior, evlvar)
elif _method in ("ekf1", "eks1"):
return ivp2filter.ivp2ekf1(_ivp, _prior, evlvar)
elif _method in ("ukf", "uks"):
return ivp2filter.ivp2ukf(_ivp, _prior, evlvar)
else:
raise ValueError("Type of filter not supported.")
| [
"probnum.diffeq.odefiltsmooth.ivp2filter.ivp2ekf1",
"probnum.diffeq.odefiltsmooth.ivp2filter.ivp2ukf",
"probnum.filtsmooth.statespace.IOUP",
"probnum.diffeq.steprule.AdaptiveSteps",
"probnum.filtsmooth.statespace.Matern",
"probnum.diffeq.odefiltsmooth.ivp2filter.ivp2ekf0",
"probnum.diffeq.steprule.Const... | [((8447, 8507), 'probnum.diffeq.odefiltsmooth.ivpfiltsmooth.GaussianIVPFilter', 'GaussianIVPFilter', (['ivp', 'gfilt'], {'with_smoothing': 'with_smoothing'}), '(ivp, gfilt, with_smoothing=with_smoothing)\n', (8464, 8507), False, 'from probnum.diffeq.odefiltsmooth.ivpfiltsmooth import GaussianIVPFilter\n'), ((9004, 9032), 'probnum.diffeq.steprule.ConstantSteps', 'steprule.ConstantSteps', (['step'], {}), '(step)\n', (9026, 9032), False, 'from probnum.diffeq import steprule\n'), ((9323, 9388), 'probnum.diffeq.steprule.AdaptiveSteps', 'steprule.AdaptiveSteps', ([], {'firststep': 'firststep', 'atol': 'atol', 'rtol': 'rtol'}), '(firststep=firststep, atol=atol, rtol=rtol)\n', (9345, 9388), False, 'from probnum.diffeq import steprule\n'), ((10677, 10781), 'probnum.filtsmooth.statespace.IBM', 'pnfs.statespace.IBM', (['(1)', 'ivp.dimension'], {'forward_implementation': '"""sqrt"""', 'backward_implementation': '"""sqrt"""'}), "(1, ivp.dimension, forward_implementation='sqrt',\n backward_implementation='sqrt')\n", (10696, 10781), True, 'import probnum.filtsmooth as pnfs\n'), ((11760, 11878), 'probnum.filtsmooth.statespace.IOUP', 'pnfs.statespace.IOUP', (['(1)', 'ivp.dimension', 'driftspeed'], {'forward_implementation': '"""sqrt"""', 'backward_implementation': '"""sqrt"""'}), "(1, ivp.dimension, driftspeed, forward_implementation=\n 'sqrt', backward_implementation='sqrt')\n", (11780, 11878), True, 'import probnum.filtsmooth as pnfs\n'), ((12955, 13075), 'probnum.filtsmooth.statespace.Matern', 'pnfs.statespace.Matern', (['(1)', 'ivp.dimension', 'lengthscale'], {'forward_implementation': '"""sqrt"""', 'backward_implementation': '"""sqrt"""'}), "(1, ivp.dimension, lengthscale,\n forward_implementation='sqrt', backward_implementation='sqrt')\n", (12977, 13075), True, 'import probnum.filtsmooth as pnfs\n'), ((14158, 14199), 'probnum.diffeq.odefiltsmooth.ivp2filter.ivp2ekf0', 'ivp2filter.ivp2ekf0', (['_ivp', '_prior', 'evlvar'], {}), '(_ivp, _prior, evlvar)\n', (14177, 14199), False, 'from probnum.diffeq.odefiltsmooth import ivp2filter\n'), ((9157, 9188), 'numpy.linalg.norm', 'np.linalg.norm', (['ivp.initrv.mean'], {}), '(ivp.initrv.mean)\n', (9171, 9188), True, 'import numpy as np\n'), ((10884, 10988), 'probnum.filtsmooth.statespace.IBM', 'pnfs.statespace.IBM', (['(2)', 'ivp.dimension'], {'forward_implementation': '"""sqrt"""', 'backward_implementation': '"""sqrt"""'}), "(2, ivp.dimension, forward_implementation='sqrt',\n backward_implementation='sqrt')\n", (10903, 10988), True, 'import probnum.filtsmooth as pnfs\n'), ((11993, 12111), 'probnum.filtsmooth.statespace.IOUP', 'pnfs.statespace.IOUP', (['(2)', 'ivp.dimension', 'driftspeed'], {'forward_implementation': '"""sqrt"""', 'backward_implementation': '"""sqrt"""'}), "(2, ivp.dimension, driftspeed, forward_implementation=\n 'sqrt', backward_implementation='sqrt')\n", (12013, 12111), True, 'import probnum.filtsmooth as pnfs\n'), ((13194, 13314), 'probnum.filtsmooth.statespace.Matern', 'pnfs.statespace.Matern', (['(2)', 'ivp.dimension', 'lengthscale'], {'forward_implementation': '"""sqrt"""', 'backward_implementation': '"""sqrt"""'}), "(2, ivp.dimension, lengthscale,\n forward_implementation='sqrt', backward_implementation='sqrt')\n", (13216, 13314), True, 'import probnum.filtsmooth as pnfs\n'), ((14253, 14294), 'probnum.diffeq.odefiltsmooth.ivp2filter.ivp2ekf1', 'ivp2filter.ivp2ekf1', (['_ivp', '_prior', 'evlvar'], {}), '(_ivp, _prior, evlvar)\n', (14272, 14294), False, 'from probnum.diffeq.odefiltsmooth import ivp2filter\n'), ((11091, 11195), 'probnum.filtsmooth.statespace.IBM', 'pnfs.statespace.IBM', (['(3)', 'ivp.dimension'], {'forward_implementation': '"""sqrt"""', 'backward_implementation': '"""sqrt"""'}), "(3, ivp.dimension, forward_implementation='sqrt',\n backward_implementation='sqrt')\n", (11110, 11195), True, 'import probnum.filtsmooth as pnfs\n'), ((12226, 12344), 'probnum.filtsmooth.statespace.IOUP', 'pnfs.statespace.IOUP', (['(3)', 'ivp.dimension', 'driftspeed'], {'forward_implementation': '"""sqrt"""', 'backward_implementation': '"""sqrt"""'}), "(3, ivp.dimension, driftspeed, forward_implementation=\n 'sqrt', backward_implementation='sqrt')\n", (12246, 12344), True, 'import probnum.filtsmooth as pnfs\n'), ((13433, 13553), 'probnum.filtsmooth.statespace.Matern', 'pnfs.statespace.Matern', (['(3)', 'ivp.dimension', 'lengthscale'], {'forward_implementation': '"""sqrt"""', 'backward_implementation': '"""sqrt"""'}), "(3, ivp.dimension, lengthscale,\n forward_implementation='sqrt', backward_implementation='sqrt')\n", (13455, 13553), True, 'import probnum.filtsmooth as pnfs\n'), ((14346, 14386), 'probnum.diffeq.odefiltsmooth.ivp2filter.ivp2ukf', 'ivp2filter.ivp2ukf', (['_ivp', '_prior', 'evlvar'], {}), '(_ivp, _prior, evlvar)\n', (14364, 14386), False, 'from probnum.diffeq.odefiltsmooth import ivp2filter\n'), ((11298, 11402), 'probnum.filtsmooth.statespace.IBM', 'pnfs.statespace.IBM', (['(4)', 'ivp.dimension'], {'forward_implementation': '"""sqrt"""', 'backward_implementation': '"""sqrt"""'}), "(4, ivp.dimension, forward_implementation='sqrt',\n backward_implementation='sqrt')\n", (11317, 11402), True, 'import probnum.filtsmooth as pnfs\n'), ((12459, 12577), 'probnum.filtsmooth.statespace.IOUP', 'pnfs.statespace.IOUP', (['(4)', 'ivp.dimension', 'driftspeed'], {'forward_implementation': '"""sqrt"""', 'backward_implementation': '"""sqrt"""'}), "(4, ivp.dimension, driftspeed, forward_implementation=\n 'sqrt', backward_implementation='sqrt')\n", (12479, 12577), True, 'import probnum.filtsmooth as pnfs\n'), ((13672, 13792), 'probnum.filtsmooth.statespace.Matern', 'pnfs.statespace.Matern', (['(4)', 'ivp.dimension', 'lengthscale'], {'forward_implementation': '"""sqrt"""', 'backward_implementation': '"""sqrt"""'}), "(4, ivp.dimension, lengthscale,\n forward_implementation='sqrt', backward_implementation='sqrt')\n", (13694, 13792), True, 'import probnum.filtsmooth as pnfs\n')] |
"""
NCL_polyg_18.py
===============
This script illustrates the following concepts:
- Adding lines, markers, and polygons to a map
- Drawing lines, markers, polygons, and text in inset axes
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/polyg_18.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/polyg_18_2_lg.png
"""
###############################################################################
# Import packages:
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import cartopy
import cartopy.crs as ccrs
from cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter
import numpy as np
from geocat.viz import util as gvutil
###############################################################################
# Define helper function to remove ticks/frames from axes
def removeTicks(axis):
axis.get_xaxis().set_visible(False)
axis.get_yaxis().set_visible(False)
###############################################################################
# Plot map, markers, and polygons
# Set size of figure
fig = plt.figure(figsize=(10, 10))
# Make grid on figure with 2 rows, 1 column
grid = plt.GridSpec(2, 20, figure=fig)
# Make subplot for map
ax = plt.subplot(grid[:-1, 1:], projection=ccrs.PlateCarree())
# Add continents
continents = cartopy.feature.NaturalEarthFeature(name='land',
category='physical',
scale='50m',
edgecolor='None',
facecolor='lightgray')
ax.add_feature(continents)
# Set map extent
ax.set_global()
# Create arrays with location of each marker
lon = np.arange(-160, 160, 20)
lat = np.arange(-80, 80, 10)
# Create array with marker symbols
# Matplotlib provides a different set of markers than NCL, so plot appearance differs
marker = [
'.', '+', '*', 'o', 'x', 's', '^', 'v', 'D', '>', '<', 'p', 'h', '8', 'X',
'd'
]
# Draw markers on diagonal line across graph
for x in range(len(lon)):
ax.plot(lon[x],
lat[x],
marker=marker[x],
color='blue',
fillstyle='none',
markersize=18,
zorder=3)
# Draw small red box in upper center
ax.add_patch(
mpatches.Rectangle(xy=[7, 47],
width=9,
height=7,
facecolor='None',
edgecolor='red',
alpha=1.0,
transform=ccrs.PlateCarree(),
zorder=5))
# Draw green window in bottom right
ax.add_patch(
mpatches.Rectangle(xy=[110, -45],
width=50,
height=35,
facecolor='lime',
alpha=0.3,
transform=ccrs.PlateCarree(),
zorder=5))
# Use gvutil function to set the ticks on axes
gvutil.set_axes_limits_and_ticks(ax,
xlim=None,
ylim=None,
xticks=np.arange(-180, 210, 30),
yticks=np.arange(-90, 120, 30),
xticklabels=None,
yticklabels=None)
# Use gvutil function to give ticks W/N/E/S labels
gvutil.add_lat_lon_ticklabels(ax,
zero_direction_label=True,
dateline_direction_label=True)
# Took out degree symbols in latitude/longitude
ax.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))
ax.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))
# Use gvutil function to set title of plot
# Set title font to bold using the r"$\bf{_____}$" formatting characters
# Spaces in title will not show up if included in curly brackets
gvutil.set_titles_and_labels(ax,
maintitle=r"$\bf{Big}$" + " " +
r"$\bf{centered}$" + " " + r"$\bf{title}$",
maintitlefontsize=25)
# Use gvutil function to plot three minor ticks for every major tick on axes
gvutil.add_major_minor_ticks(ax,
x_minor_per_major=3,
y_minor_per_major=3,
labelsize="small")
# Make second subplot for legend
ax2 = plt.subplot(grid[-1, 1:], frameon=False)
removeTicks(ax2)
# Create 6 inset axes within subplot for each field in legend
# Inset_axes positional array argument takes four values:
# [starting (bottom left) x coordinate of window, starting y coordinate of window, width of field, height of field]
# Add circle
axin1 = ax2.inset_axes([0.1, 0.8, .1, .1], frameon=False)
removeTicks(axin1)
axin1.add_patch(mpatches.Circle((0.1, 0.1), radius=.1, color='blue'))
axin1.axis('equal')
# Add label for circle
axin2 = ax2.inset_axes([0.0, 0.65, .20, .5], frameon=False)
removeTicks(axin2)
axin2.text(0,
.7,
'Marker (left justified text)',
color='blue',
fontsize=12,
verticalalignment='center')
# Add red line
axin3 = ax2.inset_axes([0.30, 0.6, .33, .5], frameon=False)
removeTicks(axin3)
axin3.plot([0, 4], [3, 3], color='red')
axin1.axis('scaled')
# Add label for red line
axin4 = ax2.inset_axes([0.33, 0.65, .33, .5], frameon=False)
removeTicks(axin4)
axin4.text(0,
.7,
'Polyline (centered text)',
color='red',
fontsize=12,
verticalalignment='center')
# Add green polygon
axin5 = ax2.inset_axes([0.62, 0.6, .33, .5], frameon=False)
removeTicks(axin5)
axin5.add_patch(
mpatches.Rectangle(xy=[.3, .3],
width=.6,
height=.3,
facecolor='lime',
alpha=0.3))
axin1.axis('scaled')
# Add label for green polygon
axin6 = ax2.inset_axes([0.66, 0.65, .33, .5], frameon=False)
removeTicks(axin6)
axin6.text(0,
.7,
'Polygon (right justified text)',
color='lime',
fontsize=12,
verticalalignment='center')
plt.show()
| [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"geocat.viz.util.set_titles_and_labels",
"matplotlib.patches.Rectangle",
"cartopy.mpl.ticker.LongitudeFormatter",
"cartopy.feature.NaturalEarthFeature",
"cartopy.crs.PlateCarree",
"matplotlib.patches.Circle",
"matplotlib.pyplot.figure",
"geoca... | [((1163, 1191), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1173, 1191), True, 'import matplotlib.pyplot as plt\n'), ((1244, 1275), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(2)', '(20)'], {'figure': 'fig'}), '(2, 20, figure=fig)\n', (1256, 1275), True, 'import matplotlib.pyplot as plt\n'), ((1394, 1522), 'cartopy.feature.NaturalEarthFeature', 'cartopy.feature.NaturalEarthFeature', ([], {'name': '"""land"""', 'category': '"""physical"""', 'scale': '"""50m"""', 'edgecolor': '"""None"""', 'facecolor': '"""lightgray"""'}), "(name='land', category='physical', scale\n ='50m', edgecolor='None', facecolor='lightgray')\n", (1429, 1522), False, 'import cartopy\n'), ((1828, 1852), 'numpy.arange', 'np.arange', (['(-160)', '(160)', '(20)'], {}), '(-160, 160, 20)\n', (1837, 1852), True, 'import numpy as np\n'), ((1859, 1881), 'numpy.arange', 'np.arange', (['(-80)', '(80)', '(10)'], {}), '(-80, 80, 10)\n', (1868, 1881), True, 'import numpy as np\n'), ((3481, 3576), 'geocat.viz.util.add_lat_lon_ticklabels', 'gvutil.add_lat_lon_ticklabels', (['ax'], {'zero_direction_label': '(True)', 'dateline_direction_label': '(True)'}), '(ax, zero_direction_label=True,\n dateline_direction_label=True)\n', (3510, 3576), True, 'from geocat.viz import util as gvutil\n'), ((3997, 4131), 'geocat.viz.util.set_titles_and_labels', 'gvutil.set_titles_and_labels', (['ax'], {'maintitle': "('$\\\\bf{Big}$' + ' ' + '$\\\\bf{centered}$' + ' ' + '$\\\\bf{title}$')", 'maintitlefontsize': '(25)'}), "(ax, maintitle='$\\\\bf{Big}$' + ' ' +\n '$\\\\bf{centered}$' + ' ' + '$\\\\bf{title}$', maintitlefontsize=25)\n", (4025, 4131), True, 'from geocat.viz import util as gvutil\n'), ((4293, 4390), 'geocat.viz.util.add_major_minor_ticks', 'gvutil.add_major_minor_ticks', (['ax'], {'x_minor_per_major': '(3)', 'y_minor_per_major': '(3)', 'labelsize': '"""small"""'}), "(ax, x_minor_per_major=3, y_minor_per_major=3,\n labelsize='small')\n", (4321, 4390), True, 'from geocat.viz import util as gvutil\n'), ((4514, 4554), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid[-1, 1:]'], {'frameon': '(False)'}), '(grid[-1, 1:], frameon=False)\n', (4525, 4554), True, 'import matplotlib.pyplot as plt\n'), ((6261, 6271), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6269, 6271), True, 'import matplotlib.pyplot as plt\n'), ((3711, 3746), 'cartopy.mpl.ticker.LatitudeFormatter', 'LatitudeFormatter', ([], {'degree_symbol': '""""""'}), "(degree_symbol='')\n", (3728, 3746), False, 'from cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter\n'), ((3777, 3813), 'cartopy.mpl.ticker.LongitudeFormatter', 'LongitudeFormatter', ([], {'degree_symbol': '""""""'}), "(degree_symbol='')\n", (3795, 3813), False, 'from cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter\n'), ((4916, 4969), 'matplotlib.patches.Circle', 'mpatches.Circle', (['(0.1, 0.1)'], {'radius': '(0.1)', 'color': '"""blue"""'}), "((0.1, 0.1), radius=0.1, color='blue')\n", (4931, 4969), True, 'import matplotlib.patches as mpatches\n'), ((5791, 5880), 'matplotlib.patches.Rectangle', 'mpatches.Rectangle', ([], {'xy': '[0.3, 0.3]', 'width': '(0.6)', 'height': '(0.3)', 'facecolor': '"""lime"""', 'alpha': '(0.3)'}), "(xy=[0.3, 0.3], width=0.6, height=0.3, facecolor='lime',\n alpha=0.3)\n", (5809, 5880), True, 'import matplotlib.patches as mpatches\n'), ((1343, 1361), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1359, 1361), True, 'import cartopy.crs as ccrs\n'), ((3236, 3260), 'numpy.arange', 'np.arange', (['(-180)', '(210)', '(30)'], {}), '(-180, 210, 30)\n', (3245, 3260), True, 'import numpy as np\n'), ((3302, 3325), 'numpy.arange', 'np.arange', (['(-90)', '(120)', '(30)'], {}), '(-90, 120, 30)\n', (3311, 3325), True, 'import numpy as np\n'), ((2651, 2669), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2667, 2669), True, 'import cartopy.crs as ccrs\n'), ((2969, 2987), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2985, 2987), True, 'import cartopy.crs as ccrs\n')] |
import numpy as np
class StanleyControl:
def __init__(self, kp=0.5):
self.path = None
self.kp = kp
def set_path(self, path):
self.path = path.copy()
def _search_nearest(self, pos):
min_dist = 99999999
min_id = -1
for i in range(self.path.shape[0]):
dist = (pos[0] - self.path[i,0])**2 + (pos[1] - self.path[i,1])**2
if dist < min_dist:
min_dist = dist
min_id = i
return min_id, min_dist
# State: [x, y, yaw, delta, v, l]
def feedback(self, state):
# Check Path
if self.path is None:
print("No path !!")
return None, None
# Extract State
x, y, yaw, delta, v, l = state["x"], state["y"], state["yaw"], state["delta"], state["v"], state["l"]
# todo
#############################################################################
# all parameter name (ex:alpha) comes from the Slides
# You need to finish the Stanley control algo
# step by step
# first you need to find the nearest point on the path
# (centered on the front wheel, previous work all on the back wheel)
# second you need to calculate the theta_e by use the
# "nearest point's yaw" and "model's yaw"
# third you need to calculate the v front(vf) and error(e)
# now, you can calculate the delta
# The next_delta is Stanley Control's output
# The target is the point on the path which you find
###############################################################################
# calculate the front wheel x, y (xf, yf)
xf = x + l*np.cos(np.deg2rad(yaw))
yf = y + l*np.sin(np.deg2rad(yaw))
# calculate the vf
vf = v / np.cos(np.deg2rad(delta))
# Search Front Target
min_idx, min_dist = self._search_nearest((xf, yf))
target = self.path[min_idx]
# calculate theta_e by "nearest point's yaw" and "model's yaw"
theta_p = target[2]
theta_e = (theta_p - yaw) % 360
if theta_e > 180:
theta_e -= 360
# calculate the error
e = [xf-target[0], yf-target[1]]
p = [np.cos(np.deg2rad(theta_p + 90)) , np.sin(np.deg2rad(theta_p + 90))]
error = np.dot(e, p)
# update delta
next_delta = np.rad2deg(np.arctan2(-self.kp*error, vf)) + theta_e
return next_delta, target
if __name__ == "__main__":
import cv2
import path_generator
import sys
sys.path.append("../")
from bicycle_model import KinematicModel
# Path
path = path_generator.path2()
img_path = np.ones((600,600,3))
for i in range(path.shape[0]-1):
cv2.line(img_path, (int(path[i,0]), int(path[i,1])), (int(path[i+1,0]), int(path[i+1,1])), (1.0,0.5,0.5), 1)
# Initialize Car
car = KinematicModel()
start = (50,300,0)
car.init_state(start)
controller = StanleyControl(kp=0.5)
controller.set_path(path)
while(True):
print("\rState: "+car.state_str(), end="\t")
# PID Longitude Control
end_dist = np.hypot(path[-1,0]-car.x, path[-1,1]-car.y)
target_v = 40 if end_dist > 265 else 0
next_a = 0.1*(target_v - car.v)
# Stanley Lateral Control
state = {"x":car.x, "y":car.y, "yaw":car.yaw, "delta":car.delta, "v":car.v, "l":car.l}
next_delta, target = controller.feedback(state)
car.control(next_a, next_delta)
# Update State & Render
car.update()
img = img_path.copy()
cv2.circle(img,(int(target[0]),int(target[1])),3,(1,0.3,0.7),2) # target points
img = car.render(img)
img = cv2.flip(img, 0)
cv2.imshow("Stanley Control Test", img)
k = cv2.waitKey(1)
if k == ord('r'):
car.init_state(start)
if k == 27:
print()
break
| [
"sys.path.append",
"path_generator.path2",
"numpy.arctan2",
"numpy.deg2rad",
"cv2.waitKey",
"bicycle_model.KinematicModel",
"numpy.ones",
"numpy.hypot",
"cv2.flip",
"numpy.dot",
"cv2.imshow"
] | [((2604, 2626), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (2619, 2626), False, 'import sys\n'), ((2695, 2717), 'path_generator.path2', 'path_generator.path2', ([], {}), '()\n', (2715, 2717), False, 'import path_generator\n'), ((2733, 2755), 'numpy.ones', 'np.ones', (['(600, 600, 3)'], {}), '((600, 600, 3))\n', (2740, 2755), True, 'import numpy as np\n'), ((2944, 2960), 'bicycle_model.KinematicModel', 'KinematicModel', ([], {}), '()\n', (2958, 2960), False, 'from bicycle_model import KinematicModel\n'), ((2370, 2382), 'numpy.dot', 'np.dot', (['e', 'p'], {}), '(e, p)\n', (2376, 2382), True, 'import numpy as np\n'), ((3203, 3253), 'numpy.hypot', 'np.hypot', (['(path[-1, 0] - car.x)', '(path[-1, 1] - car.y)'], {}), '(path[-1, 0] - car.x, path[-1, 1] - car.y)\n', (3211, 3253), True, 'import numpy as np\n'), ((3785, 3801), 'cv2.flip', 'cv2.flip', (['img', '(0)'], {}), '(img, 0)\n', (3793, 3801), False, 'import cv2\n'), ((3810, 3849), 'cv2.imshow', 'cv2.imshow', (['"""Stanley Control Test"""', 'img'], {}), "('Stanley Control Test', img)\n", (3820, 3849), False, 'import cv2\n'), ((3862, 3876), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3873, 3876), False, 'import cv2\n'), ((1854, 1871), 'numpy.deg2rad', 'np.deg2rad', (['delta'], {}), '(delta)\n', (1864, 1871), True, 'import numpy as np\n'), ((2292, 2316), 'numpy.deg2rad', 'np.deg2rad', (['(theta_p + 90)'], {}), '(theta_p + 90)\n', (2302, 2316), True, 'import numpy as np\n'), ((2327, 2351), 'numpy.deg2rad', 'np.deg2rad', (['(theta_p + 90)'], {}), '(theta_p + 90)\n', (2337, 2351), True, 'import numpy as np\n'), ((2439, 2471), 'numpy.arctan2', 'np.arctan2', (['(-self.kp * error)', 'vf'], {}), '(-self.kp * error, vf)\n', (2449, 2471), True, 'import numpy as np\n'), ((1734, 1749), 'numpy.deg2rad', 'np.deg2rad', (['yaw'], {}), '(yaw)\n', (1744, 1749), True, 'import numpy as np\n'), ((1777, 1792), 'numpy.deg2rad', 'np.deg2rad', (['yaw'], {}), '(yaw)\n', (1787, 1792), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
import os
print(os.getcwd())
train = pd.read_csv('../data/prep/train3.csv')
test = pd.read_csv('../data/prep/test3.csv')
submission = pd.read_csv('../data/raw/housing/sample_submission.csv')
train = train.iloc[:, 1:]
test = test.iloc[:, 1:]
X = train.drop('target', axis = 1)
#y = np.log1p(train.target)
y = train.target
target = test[X.columns]
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from catboost import CatBoostRegressor, Pool
from ngboost import NGBRegressor
from sklearn.metrics import make_scorer
from sklearn.model_selection import KFold
def NMAE(true, pred) -> float:
mae = np.mean(np.abs(true - pred))
score = mae / np.mean(np.abs(true))
return score
nmae_score = make_scorer(NMAE, greater_is_better=False)
kf = KFold(n_splits = 10, random_state = 42, shuffle = True)
# ngboost
ngb_pred = np.zeros(target.shape[0])
ngb_val = []
for n, (tr_idx, val_idx) in enumerate(kf.split(X, y)) :
print(f'{n + 1} FOLD Training.....')
tr_x, tr_y = X.iloc[tr_idx], y.iloc[tr_idx]
val_x, val_y = X.iloc[val_idx], np.expm1(y.iloc[val_idx])
ngb = NGBRegressor(random_state = 42, n_estimators = 1000, verbose = 0, learning_rate = 0.03)
ngb.fit(tr_x, tr_y, val_x, val_y, early_stopping_rounds = 300)
val_pred = np.expm1(ngb.predict(val_x))
val_nmae = NMAE(val_y, val_pred)
ngb_val.append(val_nmae)
print(f'{n + 1} FOLD NMAE = {val_nmae}\n')
target_data = Pool(data = target, label = None)
fold_pred = ngb.predict(target) / 10
ngb_pred += fold_pred
print(f'10FOLD Mean of NMAE = {np.mean(ngb_val)} & std = {np.std(ngb_val)}')
submission['target'] = ngb_pred
submission.to_csv("../out/ngb/ngb1.csv", header = True, index = False)
| [
"numpy.abs",
"warnings.filterwarnings",
"pandas.read_csv",
"os.getcwd",
"numpy.std",
"numpy.zeros",
"sklearn.model_selection.KFold",
"ngboost.NGBRegressor",
"numpy.expm1",
"sklearn.metrics.make_scorer",
"numpy.mean",
"catboost.Pool"
] | [((59, 92), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (82, 92), False, 'import warnings\n'), ((132, 170), 'pandas.read_csv', 'pd.read_csv', (['"""../data/prep/train3.csv"""'], {}), "('../data/prep/train3.csv')\n", (143, 170), True, 'import pandas as pd\n'), ((178, 215), 'pandas.read_csv', 'pd.read_csv', (['"""../data/prep/test3.csv"""'], {}), "('../data/prep/test3.csv')\n", (189, 215), True, 'import pandas as pd\n'), ((229, 285), 'pandas.read_csv', 'pd.read_csv', (['"""../data/raw/housing/sample_submission.csv"""'], {}), "('../data/raw/housing/sample_submission.csv')\n", (240, 285), True, 'import pandas as pd\n'), ((828, 870), 'sklearn.metrics.make_scorer', 'make_scorer', (['NMAE'], {'greater_is_better': '(False)'}), '(NMAE, greater_is_better=False)\n', (839, 870), False, 'from sklearn.metrics import make_scorer\n'), ((877, 926), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)', 'random_state': '(42)', 'shuffle': '(True)'}), '(n_splits=10, random_state=42, shuffle=True)\n', (882, 926), False, 'from sklearn.model_selection import KFold\n'), ((955, 980), 'numpy.zeros', 'np.zeros', (['target.shape[0]'], {}), '(target.shape[0])\n', (963, 980), True, 'import numpy as np\n'), ((110, 121), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (119, 121), False, 'import os\n'), ((1216, 1295), 'ngboost.NGBRegressor', 'NGBRegressor', ([], {'random_state': '(42)', 'n_estimators': '(1000)', 'verbose': '(0)', 'learning_rate': '(0.03)'}), '(random_state=42, n_estimators=1000, verbose=0, learning_rate=0.03)\n', (1228, 1295), False, 'from ngboost import NGBRegressor\n'), ((1556, 1585), 'catboost.Pool', 'Pool', ([], {'data': 'target', 'label': 'None'}), '(data=target, label=None)\n', (1560, 1585), False, 'from catboost import CatBoostRegressor, Pool\n'), ((732, 751), 'numpy.abs', 'np.abs', (['(true - pred)'], {}), '(true - pred)\n', (738, 751), True, 'import numpy as np\n'), ((1175, 1200), 'numpy.expm1', 'np.expm1', (['y.iloc[val_idx]'], {}), '(y.iloc[val_idx])\n', (1183, 1200), True, 'import numpy as np\n'), ((779, 791), 'numpy.abs', 'np.abs', (['true'], {}), '(true)\n', (785, 791), True, 'import numpy as np\n'), ((1688, 1704), 'numpy.mean', 'np.mean', (['ngb_val'], {}), '(ngb_val)\n', (1695, 1704), True, 'import numpy as np\n'), ((1715, 1730), 'numpy.std', 'np.std', (['ngb_val'], {}), '(ngb_val)\n', (1721, 1730), True, 'import numpy as np\n')] |
from unittest import TestCase
import numpy as np
from copulas.bivariate.independence import Independence
class TestIndependence(TestCase):
def test___init__(self):
"""Independence copula can be instantiated directly."""
# Setup / Run
instance = Independence()
# Check
assert isinstance(instance, Independence)
assert instance.theta is None
assert instance.tau is None
def test_fit(self):
"""Fit checks that the given values are independent."""
# Setup
instance = Independence()
data = np.array([
[1, 2],
[4, 3]
])
# Run
instance.fit(data)
# Check
instance.tau is None
instance.theta is None
def test_cumulative_distribution(self):
"""cumulative_distribution is the product of both probabilities."""
# Setup
instance = Independence()
data = np.array([
[0.0, 0.0],
[0.1, 0.1],
[0.2, 0.2],
[0.5, 0.5],
[0.9, 0.9],
[1.0, 1.0]
])
expected_result = np.array([
0.00,
0.01,
0.04,
0.25,
0.81,
1.00,
])
# Run
result = instance.cumulative_distribution(data)
# Check
(result == expected_result).all().all()
| [
"numpy.array",
"copulas.bivariate.independence.Independence"
] | [((278, 292), 'copulas.bivariate.independence.Independence', 'Independence', ([], {}), '()\n', (290, 292), False, 'from copulas.bivariate.independence import Independence\n'), ((558, 572), 'copulas.bivariate.independence.Independence', 'Independence', ([], {}), '()\n', (570, 572), False, 'from copulas.bivariate.independence import Independence\n'), ((588, 614), 'numpy.array', 'np.array', (['[[1, 2], [4, 3]]'], {}), '([[1, 2], [4, 3]])\n', (596, 614), True, 'import numpy as np\n'), ((924, 938), 'copulas.bivariate.independence.Independence', 'Independence', ([], {}), '()\n', (936, 938), False, 'from copulas.bivariate.independence import Independence\n'), ((954, 1040), 'numpy.array', 'np.array', (['[[0.0, 0.0], [0.1, 0.1], [0.2, 0.2], [0.5, 0.5], [0.9, 0.9], [1.0, 1.0]]'], {}), '([[0.0, 0.0], [0.1, 0.1], [0.2, 0.2], [0.5, 0.5], [0.9, 0.9], [1.0,\n 1.0]])\n', (962, 1040), True, 'import numpy as np\n'), ((1146, 1190), 'numpy.array', 'np.array', (['[0.0, 0.01, 0.04, 0.25, 0.81, 1.0]'], {}), '([0.0, 0.01, 0.04, 0.25, 0.81, 1.0])\n', (1154, 1190), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import json
import torch
import pprint
import argparse
import importlib
import numpy as np
import cv2
import matplotlib
matplotlib.use("Agg")
from config import system_configs
from nnet.py_factory import NetworkFactory
from config import system_configs
from utils import crop_image, normalize_
from external.nms import soft_nms, soft_nms_merge
torch.backends.cudnn.benchmark = False
class_name = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
image_ext = ['jpg', 'jpeg', 'png']
def parse_args():
parser = argparse.ArgumentParser(description="Demo CornerNet")
parser.add_argument("--demo", dest="demo",
help="demo image or image folder",
default="", type=str)
parser.add_argument("--cfg_file", help="config file",
default='CornerNet', type=str)
parser.add_argument("--testiter", dest="testiter",
help="test at iteration i",
default=None)
parser.add_argument("--suffix", dest="suffix", default=None, type=str)
args = parser.parse_args()
return args
def _rescale_dets(detections, ratios, borders, sizes):
xs, ys = detections[..., 0:4:2], detections[..., 1:4:2]
xs /= ratios[:, 1][:, None, None]
ys /= ratios[:, 0][:, None, None]
xs -= borders[:, 2][:, None, None]
ys -= borders[:, 0][:, None, None]
np.clip(xs, 0, sizes[:, 1][:, None, None], out=xs)
np.clip(ys, 0, sizes[:, 0][:, None, None], out=ys)
def kp_decode(nnet, images, K, ae_threshold=0.5, kernel=3, debug=False):
detections = nnet.test(
[images], ae_threshold=ae_threshold, K=K, kernel=kernel, debug=debug)
detections = detections.data.cpu().numpy()
return detections
if __name__ == "__main__":
args = parse_args()
if args.suffix is None:
cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
else:
cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + "-{}.json".format(args.suffix))
print("cfg_file: {}".format(cfg_file))
with open(cfg_file, "r") as f:
configs = json.load(f)
configs["system"]["snapshot_name"] = args.cfg_file
system_configs.update_config(configs["system"])
print("system config...")
pprint.pprint(system_configs.full)
test_iter = system_configs.max_iter if args.testiter is None \
else args.testiter
print("loading parameters at iteration: {}".format(test_iter))
print("building neural network...")
nnet = NetworkFactory(None)
print("loading parameters...")
nnet.load_params(test_iter)
nnet.cuda()
nnet.eval_mode()
K = configs["db"]["top_k"]
ae_threshold = configs["db"]["ae_threshold"]
nms_kernel = 3
scales = configs["db"]["test_scales"]
weight_exp = 8
merge_bbox = False
categories = configs["db"]["categories"]
nms_threshold = configs["db"]["nms_threshold"]
max_per_image = configs["db"]["max_per_image"]
nms_algorithm = {
"nms": 0,
"linear_soft_nms": 1,
"exp_soft_nms": 2
}["exp_soft_nms"]
mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32)
std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32)
top_bboxes = {}
if os.path.isdir(args.demo):
image_names = []
ls = os.listdir(args.demo)
for file_name in ls:
ext = file_name[file_name.rfind('.') + 1:].lower()
if ext in image_ext:
image_names.append(os.path.join(args.demo, file_name))
else:
image_names = [args.demo]
for image_id, image_name in enumerate(image_names):
image = cv2.imread(image_name)
height, width = image.shape[0:2]
detections = []
for scale in scales:
new_height = int(height * scale)
new_width = int(width * scale)
new_center = np.array([new_height // 2, new_width // 2])
inp_height = new_height | 127
inp_width = new_width | 127
images = np.zeros((1, 3, inp_height, inp_width), dtype=np.float32)
ratios = np.zeros((1, 2), dtype=np.float32)
borders = np.zeros((1, 4), dtype=np.float32)
sizes = np.zeros((1, 2), dtype=np.float32)
out_height, out_width = (inp_height + 1) // 4, (inp_width + 1) // 4
height_ratio = out_height / inp_height
width_ratio = out_width / inp_width
resized_image = cv2.resize(image, (new_width, new_height))
resized_image, border, offset = crop_image(resized_image, new_center, [inp_height, inp_width])
resized_image = resized_image / 255.
normalize_(resized_image, mean, std)
images[0] = resized_image.transpose((2, 0, 1))
borders[0] = border
sizes[0] = [int(height * scale), int(width * scale)]
ratios[0] = [height_ratio, width_ratio]
images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
images = torch.from_numpy(images)
dets = kp_decode(nnet, images, K, ae_threshold=ae_threshold, kernel=nms_kernel, debug=True)
dets = dets.reshape(2, -1, 8)
dets[1, :, [0, 2]] = out_width - dets[1, :, [2, 0]]
dets = dets.reshape(1, -1, 8)
_rescale_dets(dets, ratios, borders, sizes)
dets[:, :, 0:4] /= scale
detections.append(dets)
detections = np.concatenate(detections, axis=1)
classes = detections[..., -1]
classes = classes[0]
detections = detections[0]
# reject detections with negative scores
keep_inds = (detections[:, 4] > -1)
detections = detections[keep_inds]
classes = classes[keep_inds]
top_bboxes[image_id] = {}
for j in range(categories):
keep_inds = (classes == j)
top_bboxes[image_id][j + 1] = detections[keep_inds][:, 0:7].astype(np.float32)
if merge_bbox:
soft_nms_merge(top_bboxes[image_id][j + 1], Nt=nms_threshold, method=nms_algorithm, weight_exp=weight_exp)
else:
soft_nms(top_bboxes[image_id][j + 1], Nt=nms_threshold, method=nms_algorithm)
top_bboxes[image_id][j + 1] = top_bboxes[image_id][j + 1][:, 0:5]
scores = np.hstack([
top_bboxes[image_id][j][:, -1]
for j in range(1, categories + 1)
])
if len(scores) > max_per_image:
kth = len(scores) - max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, categories + 1):
keep_inds = (top_bboxes[image_id][j][:, -1] >= thresh)
top_bboxes[image_id][j] = top_bboxes[image_id][j][keep_inds]
if 1:
image = cv2.imread(image_name)
bboxes = {}
for j in range(1, categories + 1):
keep_inds = (top_bboxes[image_id][j][:, -1] > 0.5)
cat_name = class_name[j]
cat_size = cv2.getTextSize(
cat_name + '0.0', cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)[0]
color = np.random.random((3, )) * 0.6 + 0.4
color = color * 255
color = color.astype(np.int32).tolist()
for bbox in top_bboxes[image_id][j][keep_inds]:
sc = bbox[4]
bbox = bbox[0:4].astype(np.int32)
txt = '{}{:.1f}'.format(cat_name, sc)
if bbox[1] - cat_size[1] - 2 < 0:
cv2.rectangle(image,
(bbox[0], bbox[1] + 2),
(bbox[0] + cat_size[0], bbox[1] + cat_size[1] + 2),
color, -1
)
cv2.putText(image, txt,
(bbox[0], bbox[1] + cat_size[1] + 2),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), thickness=1
)
else:
cv2.rectangle(image,
(bbox[0], bbox[1] - cat_size[1] - 2),
(bbox[0] + cat_size[0], bbox[1] - 2),
color, -1
)
cv2.putText(image, txt,
(bbox[0], bbox[1] - 2),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), thickness=1
)
cv2.rectangle(image,
(bbox[0], bbox[1]),
(bbox[2], bbox[3]),
color, 2
)
cv2.imshow('out', image)
cv2.waitKey()
| [
"argparse.ArgumentParser",
"nnet.py_factory.NetworkFactory",
"numpy.clip",
"pprint.pprint",
"cv2.rectangle",
"cv2.imshow",
"os.path.join",
"config.system_configs.update_config",
"utils.normalize_",
"cv2.resize",
"numpy.partition",
"utils.crop_image",
"cv2.waitKey",
"external.nms.soft_nms_m... | [((153, 174), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (167, 174), False, 'import matplotlib\n'), ((1435, 1488), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Demo CornerNet"""'}), "(description='Demo CornerNet')\n", (1458, 1488), False, 'import argparse\n'), ((2309, 2359), 'numpy.clip', 'np.clip', (['xs', '(0)', 'sizes[:, 1][:, None, None]'], {'out': 'xs'}), '(xs, 0, sizes[:, 1][:, None, None], out=xs)\n', (2316, 2359), True, 'import numpy as np\n'), ((2364, 2414), 'numpy.clip', 'np.clip', (['ys', '(0)', 'sizes[:, 0][:, None, None]'], {'out': 'ys'}), '(ys, 0, sizes[:, 0][:, None, None], out=ys)\n', (2371, 2414), True, 'import numpy as np\n'), ((3127, 3174), 'config.system_configs.update_config', 'system_configs.update_config', (["configs['system']"], {}), "(configs['system'])\n", (3155, 3174), False, 'from config import system_configs\n'), ((3209, 3243), 'pprint.pprint', 'pprint.pprint', (['system_configs.full'], {}), '(system_configs.full)\n', (3222, 3243), False, 'import pprint\n'), ((3493, 3513), 'nnet.py_factory.NetworkFactory', 'NetworkFactory', (['None'], {}), '(None)\n', (3507, 3513), False, 'from nnet.py_factory import NetworkFactory\n'), ((4117, 4181), 'numpy.array', 'np.array', (['[0.40789654, 0.44719302, 0.47026115]'], {'dtype': 'np.float32'}), '([0.40789654, 0.44719302, 0.47026115], dtype=np.float32)\n', (4125, 4181), True, 'import numpy as np\n'), ((4193, 4257), 'numpy.array', 'np.array', (['[0.28863828, 0.27408164, 0.27809835]'], {'dtype': 'np.float32'}), '([0.28863828, 0.27408164, 0.27809835], dtype=np.float32)\n', (4201, 4257), True, 'import numpy as np\n'), ((4286, 4310), 'os.path.isdir', 'os.path.isdir', (['args.demo'], {}), '(args.demo)\n', (4299, 4310), False, 'import os\n'), ((2763, 2827), 'os.path.join', 'os.path.join', (['system_configs.config_dir', "(args.cfg_file + '.json')"], {}), "(system_configs.config_dir, args.cfg_file + '.json')\n", (2775, 2827), False, 'import os\n'), ((3042, 3054), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3051, 3054), False, 'import json\n'), ((4350, 4371), 'os.listdir', 'os.listdir', (['args.demo'], {}), '(args.demo)\n', (4360, 4371), False, 'import os\n'), ((4690, 4712), 'cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (4700, 4712), False, 'import cv2\n'), ((6512, 6546), 'numpy.concatenate', 'np.concatenate', (['detections'], {'axis': '(1)'}), '(detections, axis=1)\n', (6526, 6546), True, 'import numpy as np\n'), ((4924, 4967), 'numpy.array', 'np.array', (['[new_height // 2, new_width // 2]'], {}), '([new_height // 2, new_width // 2])\n', (4932, 4967), True, 'import numpy as np\n'), ((5076, 5133), 'numpy.zeros', 'np.zeros', (['(1, 3, inp_height, inp_width)'], {'dtype': 'np.float32'}), '((1, 3, inp_height, inp_width), dtype=np.float32)\n', (5084, 5133), True, 'import numpy as np\n'), ((5156, 5190), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {'dtype': 'np.float32'}), '((1, 2), dtype=np.float32)\n', (5164, 5190), True, 'import numpy as np\n'), ((5213, 5247), 'numpy.zeros', 'np.zeros', (['(1, 4)'], {'dtype': 'np.float32'}), '((1, 4), dtype=np.float32)\n', (5221, 5247), True, 'import numpy as np\n'), ((5270, 5304), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {'dtype': 'np.float32'}), '((1, 2), dtype=np.float32)\n', (5278, 5304), True, 'import numpy as np\n'), ((5516, 5558), 'cv2.resize', 'cv2.resize', (['image', '(new_width, new_height)'], {}), '(image, (new_width, new_height))\n', (5526, 5558), False, 'import cv2\n'), ((5603, 5665), 'utils.crop_image', 'crop_image', (['resized_image', 'new_center', '[inp_height, inp_width]'], {}), '(resized_image, new_center, [inp_height, inp_width])\n', (5613, 5665), False, 'from utils import crop_image, normalize_\n'), ((5728, 5764), 'utils.normalize_', 'normalize_', (['resized_image', 'mean', 'std'], {}), '(resized_image, mean, std)\n', (5738, 5764), False, 'from utils import crop_image, normalize_\n'), ((6000, 6055), 'numpy.concatenate', 'np.concatenate', (['(images, images[:, :, :, ::-1])'], {'axis': '(0)'}), '((images, images[:, :, :, ::-1]), axis=0)\n', (6014, 6055), True, 'import numpy as np\n'), ((6077, 6101), 'torch.from_numpy', 'torch.from_numpy', (['images'], {}), '(images)\n', (6093, 6101), False, 'import torch\n'), ((7882, 7904), 'cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (7892, 7904), False, 'import cv2\n'), ((9776, 9800), 'cv2.imshow', 'cv2.imshow', (['"""out"""', 'image'], {}), "('out', image)\n", (9786, 9800), False, 'import cv2\n'), ((9813, 9826), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (9824, 9826), False, 'import cv2\n'), ((7078, 7189), 'external.nms.soft_nms_merge', 'soft_nms_merge', (['top_bboxes[image_id][j + 1]'], {'Nt': 'nms_threshold', 'method': 'nms_algorithm', 'weight_exp': 'weight_exp'}), '(top_bboxes[image_id][j + 1], Nt=nms_threshold, method=\n nms_algorithm, weight_exp=weight_exp)\n', (7092, 7189), False, 'from external.nms import soft_nms, soft_nms_merge\n'), ((7219, 7296), 'external.nms.soft_nms', 'soft_nms', (['top_bboxes[image_id][j + 1]'], {'Nt': 'nms_threshold', 'method': 'nms_algorithm'}), '(top_bboxes[image_id][j + 1], Nt=nms_threshold, method=nms_algorithm)\n', (7227, 7296), False, 'from external.nms import soft_nms, soft_nms_merge\n'), ((7616, 7641), 'numpy.partition', 'np.partition', (['scores', 'kth'], {}), '(scores, kth)\n', (7628, 7641), True, 'import numpy as np\n'), ((4532, 4566), 'os.path.join', 'os.path.join', (['args.demo', 'file_name'], {}), '(args.demo, file_name)\n', (4544, 4566), False, 'import os\n'), ((8113, 8180), 'cv2.getTextSize', 'cv2.getTextSize', (["(cat_name + '0.0')", 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(2)'], {}), "(cat_name + '0.0', cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)\n", (8128, 8180), False, 'import cv2\n'), ((9600, 9670), 'cv2.rectangle', 'cv2.rectangle', (['image', '(bbox[0], bbox[1])', '(bbox[2], bbox[3])', 'color', '(2)'], {}), '(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)\n', (9613, 9670), False, 'import cv2\n'), ((8233, 8255), 'numpy.random.random', 'np.random.random', (['(3,)'], {}), '((3,))\n', (8249, 8255), True, 'import numpy as np\n'), ((8662, 8774), 'cv2.rectangle', 'cv2.rectangle', (['image', '(bbox[0], bbox[1] + 2)', '(bbox[0] + cat_size[0], bbox[1] + cat_size[1] + 2)', 'color', '(-1)'], {}), '(image, (bbox[0], bbox[1] + 2), (bbox[0] + cat_size[0], bbox[1\n ] + cat_size[1] + 2), color, -1)\n', (8675, 8774), False, 'import cv2\n'), ((8903, 9024), 'cv2.putText', 'cv2.putText', (['image', 'txt', '(bbox[0], bbox[1] + cat_size[1] + 2)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 0)'], {'thickness': '(1)'}), '(image, txt, (bbox[0], bbox[1] + cat_size[1] + 2), cv2.\n FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), thickness=1)\n', (8914, 9024), False, 'import cv2\n'), ((9153, 9264), 'cv2.rectangle', 'cv2.rectangle', (['image', '(bbox[0], bbox[1] - cat_size[1] - 2)', '(bbox[0] + cat_size[0], bbox[1] - 2)', 'color', '(-1)'], {}), '(image, (bbox[0], bbox[1] - cat_size[1] - 2), (bbox[0] +\n cat_size[0], bbox[1] - 2), color, -1)\n', (9166, 9264), False, 'import cv2\n'), ((9394, 9501), 'cv2.putText', 'cv2.putText', (['image', 'txt', '(bbox[0], bbox[1] - 2)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 0)'], {'thickness': '(1)'}), '(image, txt, (bbox[0], bbox[1] - 2), cv2.FONT_HERSHEY_SIMPLEX, \n 0.5, (0, 0, 0), thickness=1)\n', (9405, 9501), False, 'import cv2\n')] |
'''
Table.py
A module/class for creating LaTeX deluxetable's. In a nutshell, you create
a table instance, add columns, set options, then call the pring method.
written by <NAME>, http://users.obs.carnegiescience.edu/~cburns/site/?p=22
'''
from __future__ import print_function
from __future__ import absolute_import
import numpy
from . import sigfig
import os,string,re,sys
import types
try:
float_types = [float, numpy.float16, numpy.float32, numpy.float64]
except:
try:
float_types = [float, numpy.float32, numpy.float64]
except:
float_types = [float, numpy.float32]
class Table:
def __init__(self, numcols, justs=None, fontsize=None, rotate=False,
tablewidth=None, tablenum=None, caption=None, label=None, notes=None):
self.numcols = numcols
self.justs = justs
if self.justs is None:
self.justs = ['c' for i in range(numcols)]
else:
self.justs = list(justs)
if len(self.justs) != numcols:
raise ValueError("Error, justs must have %d elements" % (numcols))
for iter, just in enumerate(self.justs):
if just[0] not in ['c','r','l','p']:
raise ValueError("Error, invalid character for just: %s" % just)
if just[0] == 'p':
self.justs[iter] = 'p{0.2cm}'
self.fontsize = fontsize
self.rotate = rotate
self.tablewidth = tablewidth
self.tablenum = None
self.caption = caption
self.label = label
self.notes = notes
self.col_justs = []
self.headers = []
self.header_ids = []
# self.data is a list of data. Each element of the list corresponds
# to a separate "secton" of the table, headed by self.data_labels
# Each element of data should be a list of self.numcols items.
self.data = []
self.data_labels = []
self.data_label_types = []
self.sigfigs = []
self.nrows = []
def add_header_row(self, headers, cols=None):
'''Add a header row to the table. [headers] should be a list of the
strings that will be in the header. [cols], if specified, should be a
list of column indexes. If [cols] is None, it is assummed the headers
are in order and there are no multicolumns. If cols is specified, you
can indicate the the ith header spans several columns by setting the
ith value of cols to a 2-tuple of first and last columns for the span.'''
if cols is None:
if len(headers) != self.numcols:
raise ValueError("Error, headers must be a list of length %d" %\
self.numcols)
self.headers.append(headers)
self.header_ids.append(range(self.numcols))
else:
ids = []
for item in cols:
if type(item) is int:
ids.append(item)
elif type(item) is tuple:
ids += range(item[0],item[1]+1)
ids.sort
if ids != range(self.numcols):
raise ValueError("Error, missing columns in cols")
self.headers.append(headers)
self.header_ids.append(cols)
return
def add_data(self, data, label="", sigfigs=2, labeltype='cutin'):
'''Add a matrix of data. [data] should be a list with length equal to
the number of columns of the table. Each item of [data] should be a
list or numpy array. A list of strings will be inserved as is. If
a column is a 1-D array of float type, the number of significant
figures will be set to [sigfigs]. If a column is 2D with shape
(N,2), it is treated as a value with uncertainty and the uncertainty
will be rounded to [sigfigs] and value will be rounded accordingly,
and both will be printed with parenthetical errors. If a label is
given, it will be printed in the table with \cutinhead if labeltype
is 'cutin' or \sidehead if labeltype is 'side'.'''
if type(data) is not list:
raise ValueError("data should be a list")
if len(data) != self.numcols:
raise ValueError("Error, length of data mush match number of table columns")
for datum in data:
if type(datum) not in [list, numpy.ndarray]:
raise ValueError("data must be list of lists and numpy arrays")
if len(numpy.shape(datum)) not in [1,2]:
raise ValueError("data items must be 1D or 2D")
nrows = numpy.shape(data[0])[0]
for datum in data[1:]:
if numpy.shape(datum)[0] != nrows:
raise ValueError("each data item must have same first dimension")
self.nrows.append(nrows)
if len(numpy.shape(sigfigs)) == 0:
self.sigfigs.append([sigfigs for i in range(self.numcols)])
else:
if len(numpy.shape(sigfigs)) != 1:
raise ValueError("sigfigs must be scalar or have same length as number of columns")
self.sigfigs.append(sigfigs)
self.data_labels.append(label)
self.data_label_types.append(labeltype)
self.data.append(data)
def print_table(self, fp=None):
if fp is None:
fp = sys.stdout
elif type(fp) is type(""):
#if sys.version_info >= (3,0,0):
# fp = open(fp, mode, newline='')
#else:
# fp = open(fp, mode)
fp = open(fp, 'w')
we_open = True
else:
we_open = False
self.print_preamble(fp)
self.print_header(fp)
self.print_data(fp)
self.print_footer(fp)
if we_open:
fp.close()
def print_preamble(self, fp):
cols = "".join(self.justs)
fp.write("\\begin{deluxetable}{%s}\n" % cols)
if self.fontsize: fp.write("\\tabletypesize{%s}\n" % str(self.fontsize))
if self.rotate: fp.write("\\rotate\n")
if self.tablewidth is not None:
fp.write("\\tablewidth{%s}\n" % str(self.tablewidth))
else:
fp.write("\\tablewidth{0pc}\n")
if self.tablenum: fp.write("\\tablenum{%s}\n" % str(self.tablenum))
fp.write("\\tablecolumns{%d}\n" % self.numcols)
if self.caption:
if self.label:
lab = "\\label{%s}" % (self.label)
fp.write("\\tablecaption{%s}\n" % (str(self.caption)+lab))
def print_header(self,fp):
fp.write("\\tablehead{\n")
for i,headers in enumerate(self.headers):
end = ['\\\\\n',''][i == len(self.headers)-1]
for j,header in enumerate(headers):
sep = [end,'&'][j < len(headers)-1]
if len(numpy.shape(self.header_ids[i][j])) == 1:
length = self.header_ids[i][j][1] - self.header_ids[i][j][0] + 1
fp.write("\\multicolumn{%d}{c}{%s} %s " % (length, header,sep))
else:
fp.write("\\colhead{%s} %s " % (header,sep))
fp.write("}\n")
def print_data(self,fp):
fp.write("\\startdata\n")
for i,data in enumerate(self.data):
if self.data_labels[i] != '':
if self.data_label_types[0] == "cutin": # corrected by leon
fp.write("\\cutinhead{%s}\n" % self.data_labels[i])
else:
fp.write("\\sidehead{%s}\n" % self.data_labels[i])
rows = []
for j in range(numpy.shape(data[0])[0]):
rows.append([])
for k in range(len(data)):
sf = self.sigfigs[i][k]
if len(numpy.shape(data[k])) == 1:
if type(data[k][j]) in float_types:
if numpy.isnan(data[k][j]):
rows[-1].append('\\ldots')
else:
# old way
#rows[-1].append(sigfig.round_sig(data[k][j], sf)) # original way using sigfigs to round for significant numbers
# new way, sigfigs defines decimals
roundstr = '%.{}f'.format(sf)
num = eval('"' + roundstr + '" % data[k][j]')
rows[-1].append(str(num))
else:
rows[-1].append(str(data[k][j]))
else:
print(data[k][j][0])
if numpy.isnan(data[k][j][0]):
val = "\\ldots"
else:
val = sigfig.round_sig_error(data[k][j][0],data[k][j][1],sf,
paren=True)
rows[-1].append(val)
for row in rows:
fp.write(" & ".join(row))
fp.write("\\\\\n")
fp.write("\\enddata\n")
def print_footer(self, fp):
if self.notes:
fp.write("\\tablecomments{%s}\n" % (str(self.notes)))
fp.write("\\end{deluxetable}\n")
| [
"numpy.shape",
"numpy.isnan"
] | [((4405, 4425), 'numpy.shape', 'numpy.shape', (['data[0]'], {}), '(data[0])\n', (4416, 4425), False, 'import numpy\n'), ((4624, 4644), 'numpy.shape', 'numpy.shape', (['sigfigs'], {}), '(sigfigs)\n', (4635, 4644), False, 'import numpy\n'), ((4296, 4314), 'numpy.shape', 'numpy.shape', (['datum'], {}), '(datum)\n', (4307, 4314), False, 'import numpy\n'), ((4470, 4488), 'numpy.shape', 'numpy.shape', (['datum'], {}), '(datum)\n', (4481, 4488), False, 'import numpy\n'), ((4749, 4769), 'numpy.shape', 'numpy.shape', (['sigfigs'], {}), '(sigfigs)\n', (4760, 4769), False, 'import numpy\n'), ((7198, 7218), 'numpy.shape', 'numpy.shape', (['data[0]'], {}), '(data[0])\n', (7209, 7218), False, 'import numpy\n'), ((6487, 6521), 'numpy.shape', 'numpy.shape', (['self.header_ids[i][j]'], {}), '(self.header_ids[i][j])\n', (6498, 6521), False, 'import numpy\n'), ((8125, 8151), 'numpy.isnan', 'numpy.isnan', (['data[k][j][0]'], {}), '(data[k][j][0])\n', (8136, 8151), False, 'import numpy\n'), ((7352, 7372), 'numpy.shape', 'numpy.shape', (['data[k]'], {}), '(data[k])\n', (7363, 7372), False, 'import numpy\n'), ((7458, 7481), 'numpy.isnan', 'numpy.isnan', (['data[k][j]'], {}), '(data[k][j])\n', (7469, 7481), False, 'import numpy\n')] |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import functools
import imghdr
import sys
import time
from random import randint
import cv2 as cv
import numpy as np
def strtime(millsec, form="%i:%02i:%06.3f"):
"""
Time formating function
Args:
millsec(int): Number of milliseconds to format
Returns:
(string)Formated string
"""
fc = form.count("%")
days, milliseconds = divmod(millsec, 86400000)
hours, milliseconds = divmod(millsec, 3600000)
minutes, milliseconds = divmod(millsec, 60000)
seconds = float(milliseconds) / 1000
var = {1: (seconds), 2: (minutes, seconds), 3: (hours, minutes, seconds),
4: (days, hours, minutes, seconds)}
return form % var[fc]
def timeit(func):
"""
Profiling function to measure time it takes to finish function
Args:
func(*function): Function to meassure
Returns:
(*function) New wrapped function with meassurment
"""
@functools.wraps(func)
def newfunc(*args, **kwargs):
start_time = time.time()
out = func(*args, **kwargs)
elapsed_time = time.time() - start_time
ftime = strtime(elapsed_time * 1000)
msg = "function [{}] finished in {}"
print(msg.format(func.__name__, ftime))
return out
return newfunc
class BoardSearcher:
saved_cnt = None
last_saved_slide = None
last_saved_slide_mask = None
last_saved_hash = None
last_hash = None
similarity = 0.60
hash_function = None
debug = True
main_col = (110, 255, 110)
grid_size = (8, 8)
stored_section = None
last_saved_section = None
section_threshold = 15
reject_threshold = 10
section_overlap = 10
debug_image = None
def __init__(self, n_slides=0,
frame_counter=0,
save_interval=30,
similarity=0.60,
compare_function='phash',
section_overlap=10,
grid_size=(8, 8),
debug=True):
self.debug = debug
self.width = None
self.height = None
self.saved_cnt = None
self.seed = None
self.number_of_slides = n_slides
self.frame_counter = frame_counter
# number of frames which have to pass to run again check if
# current frame is similar to the last saved slide
self.save_interval = save_interval
# ratio of similarity between images based on which we decide if we
# are going to save an image
self.similarity = similarity
self.__func_keyword_to_function__(compare_function)
self.load_config()
self.section_threshold = save_interval-1
self.reject_threshold = round(0.5*save_interval)
self.section_overlap = section_overlap
self.grid_size = grid_size
self.stored_section = [[[None, None]
for x in range(grid_size[0])]
for x in range(grid_size[1])]
self.last_saved_section = [[[None, None]
for x in range(grid_size[0])]
for x in range(grid_size[1])]
def __func_keyword_to_function__(self, keyword):
switcher = {
'dhash': '__compute_dhash__',
'phash': '__compute_phash__',
'ahash': '__compute_ahash__',
'orb': None
}
compare_function = switcher.get(keyword, '__compute_dhash__')
self.hash_function = getattr(self, compare_function)
def load_config(self):
"""
Loads hsv values for image tresholding from config.
"""
with open('config.tsv') as f:
out = f.read().strip().split('\t')
if len(out) == 2:
self.lo, self.hi = map(int, out)
def save_config(self):
"""
Saves trackbar tresholding hsv values to config file.
"""
with open('config.tsv', 'w') as f:
f.write('\t'.join(map(str, [self.lo, self.hi])))
def trackbar_callback(self, x):
"""
Callback for trackbars.
Stores values of current state of trackbars.
"""
self.save_config()
def find_board(self, image):
"""
Finds a board by calling openCV function to find contures in image.
Than it sorts those contures and stores the biggest one.
In case there is more than one we go over all found contures and
keep only one with 4 points
Args:
image(numpy.ndarray): Image to find contures from
Returns:
Found conture in given image
"""
im = image.copy()
im = cv.dilate(im, ((5, 5)), iterations=8)
(cnts, _) = cv.findContours(im,
cv.RETR_TREE,
cv.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv.contourArea, reverse=True)[:10]
our_cnt = None
for c in cnts:
peri = cv.arcLength(c, True)
approx = cv.approxPolyDP(c, 0.1 * peri, True)
if len(approx) == 4:
# The board needs to be at least 1/3 of the image size
min_size = np.array([self.height * 1/3.0, self.height * 1/3.9])
a = np.abs(approx[0] - approx[2])[0] > min_size
b = np.abs(approx[1] - approx[3])[0] > min_size
true = [True, True]
if np.array_equal(a, true) or np.array_equal(b, true):
our_cnt = approx
break
return our_cnt
def get_seed(self, image):
"""
Tries to find good seed for flood fill algorithm by thresholding
the image with main color. Then randomly picking one image in created
blob areas.
Args:
image(numpy.ndarray): Image to search in
Returns:
Coordinates of seed position
"""
if self.seed is not None and self.saved_cnt is not None:
return self.seed
im = image.copy()
blobs = cv.inRange(im, (0.5*self.main_col[0],
0.5*self.main_col[1],
0.5*self.main_col[2]),
(self.main_col[0],
self.main_col[1],
self.main_col[2]))
(cnts, _) = cv.findContours(blobs, cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE)
cnt = sorted(cnts, key=cv.contourArea, reverse=True)[0]
self.seed = tuple(cnt[randint(0, len(cnt)-1)][0])
if self.debug is True:
cv.circle(im, (self.seed[0], self.seed[1]), 5, (0, 0, 255), -1)
cv.imshow("seed", im)
return self.seed
def get_mask(self, image):
"""
Returns mask of image. We use floodfill algorithm that
compares 4 neighboring pixels and based on specified
threashold fills mask image that is bigger by two pixels
in every direction with white color and than we remove
left noise by running dilation
Args:
image(numpy.ndarray): Preprocessed image
Returns:
Mask of given image
"""
h, w = image.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
connectivity = 4
mask[:] = 0
if self.debug is True:
self.lo = cv.getTrackbarPos('lo', 'result')
self.hi = cv.getTrackbarPos('hi', 'result')
flags = connectivity
flags |= cv.FLOODFILL_MASK_ONLY
flags |= 255 << 8
self.seed = self.get_seed(image)
cv.floodFill(image, mask, self.seed, (255, 255, 255), (self.lo,)*3,
(self.hi,)*3, flags)
kernel = np.ones((1, 1), np.uint8)
mask = cv.dilate(mask, kernel, iterations=4)
return mask
def create_trackbars(self):
"""
Creates window for displaying trackbars and mask of image
"""
# create window
cv.namedWindow('result')
cv.createTrackbar('lo', 'result', self.lo, 255, self.trackbar_callback)
cv.createTrackbar('hi', 'result', self.hi, 255, self.trackbar_callback)
def __hamming_distance__(self, hash1, hash2):
if(len(hash1) != len(hash2)):
return 0
return sum(map(lambda x: 0 if x[0] == x[1] else 1, zip(hash1, hash2)))
def __check_change_orb__(self, image, last_image):
"""
Computes ORB fetures on last saved slide and given image.
Tries to match features of these images and calculates ratio
of matched features with all found features in last saved slide
Args:
image(numpy.ndarray): Image from which to get features
last_image(numpy.ndarray): Image which we want to compare
Returns:
float: Similararity between last saved image and given image
"""
orb = cv.ORB()
kp1, ds1 = orb.detectAndCompute(self.last_image, None)
kp2, ds2 = orb.detectAndCompute(image, None)
bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
matches = bf.match(ds1, ds2)
return float(len(matches))/len(kp1)
def __compute_ahash__(self, image):
"""
Computes aHash. Implemantation based on
http://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html
Args:
image(numpy.ndarray): Image from which to compute the hash
Returns:
numpy.ndarray: 2D binary array with computed aHash
"""
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
resized = cv.resize(gray, (8, 8), interpolation=cv.INTER_AREA)
mean = cv.mean(resized)[0]
ret = resized > mean
return ret.flatten()
def __compute_phash__(self, image):
"""
Computes pHash based on discrete cosine transformation.
Implemantation based on
http://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html
Args:
image(numpy.ndarray): Image from which to compute the hash
Returns:
numpy.ndarray: 2D binary array with computed pHash
"""
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
resized = cv.resize(gray, (32, 32), interpolation=cv.INTER_AREA)
dct = cv.dct(np.float32(resized))
dct = np.uint8(dct)
dct_low_freq = dct[:8, :8]
med = np.median(dct_low_freq)
ret = dct_low_freq > med
return ret.flatten()
def __compute_dhash__(self, image):
"""
Computes dHash. Implemantation based on
http://www.hackerfactor.com/blog/index.php?/archives/529-Kind-of-Like-That.html
Args:
image(numpy.ndarray): Image from which to compute the hash
Returns:
numpy.ndarray: 2D binary array with computed dHash
"""
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
resized = cv.resize(gray, (9, 8), interpolation=cv.INTER_AREA)
ret = resized[:, 1:] > resized[:, :-1]
return ret.flatten()
def __compare_hashes__(self, image, compare_image=None):
"""
Generate hashes of last saved slide and given image and
computes hamming distance between hashes
Args:
image(numpy.ndarray): Image from which to compute the hash and
compare to last saved slide hash
Returns:
float: Ratio between hamming distance of hash and length of
the hash
"""
if compare_image is None:
if self.last_saved_hash is None:
tmp_hash = self.hash_function(self.last_saved_slide)
self.last_saved_hash = tmp_hash
self.last_hash = self.hash_function(image)
new_hash = self.last_hash
old_hash = self.last_saved_hash
else:
new_hash = self.hash_function(image)
old_hash = self.hash_function(compare_image)
hamming = self.__hamming_distance__(new_hash, old_hash)
hash_len = len(new_hash)
return float((hash_len - hamming))/hash_len
def check_change(self, image, mask):
"""
Loads last slide and last image mask and perfroms bitwise &
between masks of current and last image and then applies
this new mask to old and current image. Then calls similarity check
function.If returned value is more than set similarity then the
pictures are almost the same and we don't have to save else we
save new slide.
Args:
image(numpy.ndarray): Image to check
mask(numpy.ndarray): Mask of given image
Returns:
bool: True if images are the same. False otherwise
"""
if self.last_saved_slide is None or self.last_saved_slide_mask is None:
last_slide_name = "slide{}.png".format(self.number_of_slides)
self.last_saved_slide = cv.imread(last_slide_name,
cv.CV_LOAD_IMAGE_COLOR)
self.last_saved_slide_mask = cv.imread("mask.png",
cv.CV_LOAD_IMAGE_GRAYSCALE)
if self.last_saved_slide_mask is None or self.last_saved_slide is None:
return True
if mask.shape != self.last_saved_slide_mask.shape:
return True
prepared_mask = cv.bitwise_and(self.last_saved_slide_mask, mask)
last_saved_masked_slide = cv.bitwise_and(self.last_saved_slide,
self.last_saved_slide,
mask=prepared_mask)
check_image = cv.bitwise_and(image, image, mask=prepared_mask)
if self.hash_function is None:
val = self.__check_change_orb__(check_image,
last_saved_masked_slide)
else:
val = self.__compare_hashes__(check_image,
last_saved_masked_slide)
if(val > self.similarity):
return False
return True
def get_sorted_rectangle(self, cnt):
"""
Tries to determine which corner is which based on
given conture and then sorts them in correct order so
we can use it latter to shift perspective of image
Args:
cnt(numpy.ndarray): Contures of a board
Returns:
Corectly sorted conture of a board
"""
pts = cnt.reshape(4, 2)
rect = np.zeros((4, 2), dtype="float32")
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def get_cropped_image(self, rect, image, mask):
"""
Tries to crop the table from image and warps its perspective
so we can get table image as if we are standing in front of it
Args:
rect(numpy.ndarray): Contures of a board
image(numpy.ndarray): Image to shift and crop from
mask(numpy.ndarray): Mask to shift
Returns:
Shifted perspective of croped table and its mask for
further processing and checking.
"""
(tl, tr, br, bl) = rect
width_a = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
width_b = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
height_a = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
height_b = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
max_width = max(int(width_a), int(width_b))
max_height = max(int(height_a), int(height_b))
dst = np.array([
[0, 0],
[max_width - 1, 0],
[max_width - 1, max_height - 1],
[0, max_height - 1]],
dtype="float32")
warp_mat = cv.getPerspectiveTransform(rect, dst)
warp = cv.warpPerspective(image, warp_mat, (max_width, max_height))
warp_mask = cv.warpPerspective(mask, warp_mat, (max_width, max_height))
return (warp, warp_mask)
def write_image(self, cnt, board, mask):
"""
Handles writing images to the disk. Stitches parts of the board
together based on last image and current proccesed one
then it performs basic check how much is this image different from
previous one. Based on results decides to write or not
Args:
cnt(numpy.ndarray): Contures of a board
image(numpy.ndarray): Image to compare and write
mask(numpy.ndarray): Mask of same areas of compared images
"""
if cnt is None:
return
stitch = self.stitch_board(board)
if self.debug is False:
output = stitch.copy()
cv.putText(output,
"Created slides: {}".format(self.number_of_slides),
(output.shape[1] - 200, 50),
cv.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)
cv.imshow("output", output)
if self.check_change(stitch, mask) is True:
cv.imwrite("slide{0}.png".format(self.number_of_slides), stitch)
cv.imwrite("mask.png", mask)
self.last_saved_slide = stitch
self.last_saved_slide_mask = mask
self.last_saved_hash = self.last_hash
self.last_saved_section = self.stored_section
self.number_of_slides += 1
for x in range(self.grid_size[0]):
for y in range(self.grid_size[1]):
self.stored_section[x][y] = [None, None]
def find_and_draw_edges(self, image, origin):
"""
Extract mask of the image. Based on this mask calls function
to find main area where board is located in the image.
Calls function find_board which returns conture of a board.
Draws conture of board
Args:
image(numpy.ndarray): Preprocessed image
origin(numpy.ndarray): Original loaded image
"""
mask = self.get_mask(image)
if self.debug:
cv.imshow('mask', mask)
our_cnt = self.find_board(mask)
if our_cnt is None:
our_cnt = self.saved_cnt
if self.saved_cnt is None:
self.saved_cnt = our_cnt
img = origin.copy()
cv.drawContours(img, [our_cnt], -1, (0, 0, 255), 3)
if our_cnt is None:
return
rect = self.get_sorted_rectangle(our_cnt)
warp, warp_mask = self.get_cropped_image(rect, img, mask)
self.split_board(warp, warp_mask)
if(self.frame_counter == 0):
self.write_image(our_cnt, warp, warp_mask)
self.frame_counter = (self.frame_counter + 1) % self.save_interval
if self.debug:
cv.imshow("final image", img)
def preprocesing(self, image):
"""
Makes a copy of input image then makes a threasholding operation
so we can get mask of dominant color areas. Then applies
that mask to every channel of output image.
Args:
image(numpy.ndarray): Image to process
Returns:
Image with boosted green channel
"""
im = image.copy()
self.main_col = cv.mean(im)[:3]
c_boost = cv.inRange(image, (0.25*self.main_col[0],
0.25*self.main_col[1],
0.25*self.main_col[2]),
(1.5*self.main_col[0],
2.5*self.main_col[1],
1.5*self.main_col[2]))
im[:, :, 0] = cv.bitwise_and(image[:, :, 0], c_boost)
im[:, :, 1] = cv.bitwise_and(image[:, :, 1], c_boost)
im[:, :, 2] = cv.bitwise_and(image[:, :, 2], c_boost)
if self.debug:
cv.imshow("preprocesing", im)
cv.imwrite("preprocesing.png", im)
return im
def __get_occlusion_mask__(self, board):
"""
Function tries to extract mask of object in front of the table
by diff-ing last saved slide against current image and then thresholds
this to get mask of image. After this we dilate image to get rid of
small blobs.
Args:
board(numpy.ndarray): Image of the croped board
Returns:
(numpy.ndarray): Mask of objects in foreground
"""
if self.last_saved_slide is None:
return np.zeros(board.shape[:2], dtype="uint8")
height, width = board.shape[:2]
size = height/3 * width/3
gray_old = cv.cvtColor(self.last_saved_slide, cv.COLOR_BGR2GRAY)
gray_new = cv.cvtColor(board, cv.COLOR_BGR2GRAY)
if gray_old.shape > gray_new.shape:
gray_old = cv.resize(gray_old, (gray_new.shape[1],
gray_new.shape[0]))
elif gray_old.shape < gray_new.shape:
gray_new = cv.resize(gray_new, (gray_old.shape[1],
gray_old.shape[0]))
frame_delta = cv.absdiff(gray_new, gray_old)
thresh = cv.threshold(frame_delta, 50, 255, cv.THRESH_BINARY)[1]
inter = cv.dilate(thresh, None, iterations=2)
(cnts, _) = cv.findContours(inter,
cv.RETR_TREE,
cv.CHAIN_APPROX_SIMPLE)
for c in cnts:
if cv.contourArea(c) > size:
cv.drawContours(inter, [c], 0, 255, -1)
else:
cv.drawContours(inter, [c], 0, 0, -1)
return inter
def __get_section_boundary__(self, x, y, section_size, width, height):
"""
Functions returns based on given arguments boundary of individual
section.
Args:
x(int): number of section on x axis
y(int): number of section on y axis
section_size(tuple): width and height of a section
width(int): width of table
height(int): height of table
Returns:
Coordiantes of cornes of a section
"""
y_first = y*section_size[1]-self.section_overlap
y_second = (y+1)*section_size[1]+self.section_overlap
x_first = x*section_size[0]-self.section_overlap
x_second = (x+1)*section_size[0]+self.section_overlap
if y_first < 0:
y_first = 0
if y_second > height:
y_second = height
if x_first < 0:
x_first = 0
if x_second > width:
x_second = width
return (y_first, y_second, x_first, x_second)
def split_board(self, board, mask):
"""
Function splits the board into sections that are then
individually processed. It runs through every section and
checks if it touches any occluding object if it does "seen"
counter is not incresed. If it doesn't sections is stored in
a list and "seen" counter is incresed
Args:
board(numpy.ndarray): Image of crop and rotated board
mask(numpy.ndarray): Mask of crop and rotated board
Return:
(numpy.ndarray): Final image of board without occluding objects
"""
height, width = board.shape[:2]
section_size = (width/self.grid_size[0], height/self.grid_size[1])
occlusion_mask = self.__get_occlusion_mask__(board)
if self.debug:
self.debug_image = board.copy()
for x in range(self.grid_size[0]):
for y in range(self.grid_size[1]):
boundaries = self.__get_section_boundary__(x, y, section_size,
width, height)
tmpimg = board[boundaries[0]:boundaries[1],
boundaries[2]:boundaries[3]]
intersection = occlusion_mask[boundaries[0]:boundaries[1],
boundaries[2]:boundaries[3]]
count = cv.countNonZero(intersection)
if self.stored_section[x][y][0] is None:
self.stored_section[x][y][0] = tmpimg
self.stored_section[x][y][1] = 0
if count <= 0:
self.stored_section[x][y][1] += 1
else:
if self.debug:
cv.rectangle(self.debug_image, (x*section_size[0],
y*section_size[1]),
((x+1)*section_size[0],
(y+1)*section_size[1]),
(0, 0, 255), 1)
def stitch_board(self, board):
"""
Function takes sections of a board and based on given thresholding
values tries to stitch them into one final image. If we seen
section more than given section thresholding value we sow this
section into final slide. If section is lower than section reject
threshold we sow last good section into final image. If value is
between these two thresholds we check how similar is this section
to last good save one if they are similar last good one is sow if they
are not we got some new information and new section is put into final
slide
Args:
board(numpy.ndarray): Image of a board
Returns:
(numpy.ndarray): Final image of a board from stitched sections
"""
height, width = board.shape[:2]
section_size = (width/self.grid_size[0], height/self.grid_size[1])
if self.last_saved_slide is None:
blank = np.zeros((height, width, 3), dtype="uint8")
else:
blank = self.last_saved_slide.copy()
if self.debug:
font_offset = 35
for x in range(self.grid_size[0]):
for y in range(self.grid_size[1]):
seen = self.stored_section[x][y][1]
section = self.stored_section[x][y][0]
boundaries = self.__get_section_boundary__(x, y, section_size,
width, height)
if self.last_saved_section[x][y][0] is not None:
last_good_section = self.last_saved_section[x][y][0]
else:
blank[boundaries[0]:boundaries[1],
boundaries[2]:boundaries[3]] = section
self.last_saved_section[x][y][0] = section
if self.debug:
cv.putText(self.debug_image, "{}".format(seen),
(boundaries[2]+font_offset,
boundaries[0]+font_offset),
cv.FONT_HERSHEY_COMPLEX, 0.5,
(255, 0, 0), 1)
continue
if blank[boundaries[0]:boundaries[1],
boundaries[2]:boundaries[3]].shape != section.shape:
continue
if seen >= self.section_threshold:
blank[boundaries[0]:boundaries[1],
boundaries[2]:boundaries[3]] = section
if self.debug:
cv.putText(self.debug_image, "{}".format(seen),
(boundaries[2]+font_offset,
boundaries[0]+font_offset),
cv.FONT_HERSHEY_COMPLEX, 0.5,
(0, 255, 0), 1)
elif (seen < self.section_threshold and
seen > self.reject_threshold):
sim = self.__compare_hashes__(section, last_good_section)
if sim <= self.similarity:
blank[boundaries[0]:boundaries[1],
boundaries[2]:boundaries[3]] = section
if self.debug:
cv.putText(self.debug_image, "{}".format(seen),
(boundaries[2]+font_offset,
boundaries[0]+font_offset),
cv.FONT_HERSHEY_COMPLEX, 0.5,
(0, 0, 255), 1)
if self.debug:
cv.imshow("stitched image", blank)
cv.imshow("board", self.debug_image)
return blank
def get_board(self, image):
"""
Makes a copy of input image, applies median blur to cartoon-ify
image. It gets rid of noise and not wanted colors.
Calls function which will find a board and
draw edges of that board to original image
Args:
image(numpy.ndarray): Image to process
"""
if self.width is None or self.height is None:
self.height, self.width = image.shape[:2]
out = cv.medianBlur(image, 5)
out = self.preprocesing(out)
self.find_and_draw_edges(out, image)
def get_conture(self, image):
"""
Function copies the image and transforms color space of
this copied image to HSV. Then gets mask and calls main
funtion for finding the board
Args:
image(numpy.ndarray): Loaded image
Returns:
Conture of given image
"""
hsv_image = cv.cvtColor(image, cv.COLOR_BGR2HSV)
mask = self.get_mask(hsv_image)
return self.find_board(mask)
def process_image(self, image):
"""
Main image search function
Args:
image(string): Path to image file to process
"""
im = cv.imread(image, cv.CV_LOAD_IMAGE_COLOR)
if(im is None):
print("Can not open file.", file=sys.stderr)
return
if self.debug is True:
self.create_trackbars()
while(True):
self.get_board(im)
if cv.waitKey(1) & 0xFF == 27:
break
@timeit
def process_video(self, video):
"""
Main video search function
Args:
video(string): Path to video file to process
"""
if self.debug is False:
# avgpf - average time it took to process frames in one second
frame_counter, fps, old_frames, timer, avgpf = 0, 0, 0, 0, 0
print("Press ESC to exit")
vid = cv.VideoCapture(video)
all_frames = int(vid.get(cv.cv.CV_CAP_PROP_FRAME_COUNT))
if self.debug is True:
self.create_trackbars()
while(vid.isOpened()):
ret, frame = vid.read()
if ret is False:
break
if self.debug is False:
start_time = time.time()
self.get_board(frame)
elapsed_time = time.time() - start_time
timer += elapsed_time
if timer > 1:
fps = frame_counter - old_frames
avgpf = timer / fps
timer %= 1
old_frames = frame_counter
else:
self.get_board(frame)
if cv.waitKey(1) & 0xFF == 27:
break
if self.debug is False:
curr_pos = int(vid.get(cv.cv.CV_CAP_PROP_POS_MSEC))
frame_counter += 1
message = ("\rprocessing frame #: {0}/{1} | "
"current position: {2} | "
"processed fps: {3} | "
"avg proccess frame time: {4:.02f} ms"
).format(frame_counter, all_frames,
strtime(curr_pos, "%i:%02i:%02i"), fps,
avgpf * 1000)
if self.saved_cnt is None:
im = np.ones((1, 1))
cv.imshow("output", im)
print(message, end="")
vid.release()
if self.debug is False:
print("\nNumber of created slides: {}"
.format(self.number_of_slides))
def start_processing(self, input_file):
"""
Main function to determine if input file is a video or image
and start processing the file accordingly
Args:
input_file(string): Path to input file
"""
try:
if (input_file.endswith(video_extension_list)):
self.process_video(input_file)
elif (imghdr.what is not None):
self.process_image(input_file)
else:
print("Unrecognized file format", file=sys.stderr)
cv.destroyAllWindows()
except IOError:
print("Wrong file or path to file", file=sys.stderr)
video_extension_list = ("mkv", "wmv", "avi", "mp4")
def main(input_file, slide_number=0, start_frame=0, check_interval=30,
sim=0.60, compare_func='dhash', overlap=10, grid=(16, 16), dbg=True):
board = BoardSearcher(n_slides=slide_number, frame_counter=start_frame,
save_interval=check_interval, similarity=sim,
compare_function=compare_func,
section_overlap=overlap, grid_size=grid, debug=dbg)
for file_name in input_file:
board.start_processing(file_name)
if __name__ == '__main__':
desc = '''
board2slides - Extracts notes as slides from educational
(whiteboard/blackboard) videos.
'''
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('filename', nargs='+', metavar='filename',
help='List of vidos or images to process.')
parser.add_argument('-n', '--slide-number', default=0,
type=int, metavar='N', dest='slide_number',
help='''
On which slide number to start. Slide with this
number will also be loaded. (default: 0)
''')
parser.add_argument('-i', '--save-interval', default=30,
type=int, metavar='I', dest='save_interval',
help='''
How many frames have to pass to perform check if
board changed and possibly save slide from video.
(default: 30)
''')
parser.add_argument('-s', '--similarity', default=0.60,
type=float, metavar='S', dest='similarity',
help='''
On have many percent frames have to be similar to
skip saving of slide. (default: 0.60)
''')
parser.add_argument('-f', '--start-frame', default=0,
type=int, metavar='F', dest='start_frame',
help='''
On which frame to start processing the video.
(default: 0)
''')
parser.add_argument('-c', '--compare-function', default='phash',
dest='cfunc',
choices=['dhash', 'phash', 'ahash', 'orb'],
help='''
Specify a compare function which is going to
be used to perform similarity chceck between
last saved slide and currently proccesed one.
(default: phash)
''')
parser.add_argument('--grid-width', default=16,
type=float, metavar='T', dest='grid_width',
help='''
How many sections we should split image of a board
along x axis. (default: 16)
''')
parser.add_argument('--section-overlap', default=10,
type=int, metavar='O', dest='sec_over',
help='''
How much such individual section overlap.
(default: 10)
''')
parser.add_argument('--grid-height', default=16,
type=float, metavar='T', dest='grid_height',
help='''
How many sections we should split image of a board
along y axis. (default: 16)
''')
parser.add_argument('-d', '--debug', action='store_true', dest='debug',
help='''
Turns off debuging features. (default: turned OFF)
''')
args = parser.parse_args()
main(args.filename, slide_number=args.slide_number,
start_frame=args.start_frame, check_interval=args.save_interval,
sim=args.similarity, compare_func=args.cfunc, overlap=args.sec_over,
grid=(args.grid_width, args.grid_height), dbg=args.debug)
| [
"numpy.abs",
"argparse.ArgumentParser",
"cv2.bitwise_and",
"cv2.medianBlur",
"cv2.getPerspectiveTransform",
"cv2.arcLength",
"cv2.approxPolyDP",
"numpy.argmax",
"numpy.ones",
"numpy.argmin",
"cv2.floodFill",
"cv2.rectangle",
"cv2.absdiff",
"cv2.imshow",
"cv2.inRange",
"cv2.warpPerspect... | [((1006, 1027), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1021, 1027), False, 'import functools\n'), ((33858, 33899), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc'}), '(description=desc)\n', (33881, 33899), False, 'import argparse\n'), ((1083, 1094), 'time.time', 'time.time', ([], {}), '()\n', (1092, 1094), False, 'import time\n'), ((4746, 4781), 'cv2.dilate', 'cv.dilate', (['im', '(5, 5)'], {'iterations': '(8)'}), '(im, (5, 5), iterations=8)\n', (4755, 4781), True, 'import cv2 as cv\n'), ((4804, 4861), 'cv2.findContours', 'cv.findContours', (['im', 'cv.RETR_TREE', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(im, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n', (4819, 4861), True, 'import cv2 as cv\n'), ((6139, 6288), 'cv2.inRange', 'cv.inRange', (['im', '(0.5 * self.main_col[0], 0.5 * self.main_col[1], 0.5 * self.main_col[2])', '(self.main_col[0], self.main_col[1], self.main_col[2])'], {}), '(im, (0.5 * self.main_col[0], 0.5 * self.main_col[1], 0.5 * self.\n main_col[2]), (self.main_col[0], self.main_col[1], self.main_col[2]))\n', (6149, 6288), True, 'import cv2 as cv\n'), ((6457, 6521), 'cv2.findContours', 'cv.findContours', (['blobs', 'cv.RETR_EXTERNAL', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(blobs, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n', (6472, 6521), True, 'import cv2 as cv\n'), ((7357, 7391), 'numpy.zeros', 'np.zeros', (['(h + 2, w + 2)', 'np.uint8'], {}), '((h + 2, w + 2), np.uint8)\n', (7365, 7391), True, 'import numpy as np\n'), ((7720, 7817), 'cv2.floodFill', 'cv.floodFill', (['image', 'mask', 'self.seed', '(255, 255, 255)', '((self.lo,) * 3)', '((self.hi,) * 3)', 'flags'], {}), '(image, mask, self.seed, (255, 255, 255), (self.lo,) * 3, (self\n .hi,) * 3, flags)\n', (7732, 7817), True, 'import cv2 as cv\n'), ((7847, 7872), 'numpy.ones', 'np.ones', (['(1, 1)', 'np.uint8'], {}), '((1, 1), np.uint8)\n', (7854, 7872), True, 'import numpy as np\n'), ((7888, 7925), 'cv2.dilate', 'cv.dilate', (['mask', 'kernel'], {'iterations': '(4)'}), '(mask, kernel, iterations=4)\n', (7897, 7925), True, 'import cv2 as cv\n'), ((8101, 8125), 'cv2.namedWindow', 'cv.namedWindow', (['"""result"""'], {}), "('result')\n", (8115, 8125), True, 'import cv2 as cv\n'), ((8134, 8205), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""lo"""', '"""result"""', 'self.lo', '(255)', 'self.trackbar_callback'], {}), "('lo', 'result', self.lo, 255, self.trackbar_callback)\n", (8151, 8205), True, 'import cv2 as cv\n'), ((8214, 8285), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""hi"""', '"""result"""', 'self.hi', '(255)', 'self.trackbar_callback'], {}), "('hi', 'result', self.hi, 255, self.trackbar_callback)\n", (8231, 8285), True, 'import cv2 as cv\n'), ((9019, 9027), 'cv2.ORB', 'cv.ORB', ([], {}), '()\n', (9025, 9027), True, 'import cv2 as cv\n'), ((9157, 9203), 'cv2.BFMatcher', 'cv.BFMatcher', (['cv.NORM_HAMMING'], {'crossCheck': '(True)'}), '(cv.NORM_HAMMING, crossCheck=True)\n', (9169, 9203), True, 'import cv2 as cv\n'), ((9664, 9701), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (9675, 9701), True, 'import cv2 as cv\n'), ((9720, 9772), 'cv2.resize', 'cv.resize', (['gray', '(8, 8)'], {'interpolation': 'cv.INTER_AREA'}), '(gray, (8, 8), interpolation=cv.INTER_AREA)\n', (9729, 9772), True, 'import cv2 as cv\n'), ((10293, 10330), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (10304, 10330), True, 'import cv2 as cv\n'), ((10349, 10403), 'cv2.resize', 'cv.resize', (['gray', '(32, 32)'], {'interpolation': 'cv.INTER_AREA'}), '(gray, (32, 32), interpolation=cv.INTER_AREA)\n', (10358, 10403), True, 'import cv2 as cv\n'), ((10460, 10473), 'numpy.uint8', 'np.uint8', (['dct'], {}), '(dct)\n', (10468, 10473), True, 'import numpy as np\n'), ((10523, 10546), 'numpy.median', 'np.median', (['dct_low_freq'], {}), '(dct_low_freq)\n', (10532, 10546), True, 'import numpy as np\n'), ((10992, 11029), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (11003, 11029), True, 'import cv2 as cv\n'), ((11048, 11100), 'cv2.resize', 'cv.resize', (['gray', '(9, 8)'], {'interpolation': 'cv.INTER_AREA'}), '(gray, (9, 8), interpolation=cv.INTER_AREA)\n', (11057, 11100), True, 'import cv2 as cv\n'), ((13537, 13585), 'cv2.bitwise_and', 'cv.bitwise_and', (['self.last_saved_slide_mask', 'mask'], {}), '(self.last_saved_slide_mask, mask)\n', (13551, 13585), True, 'import cv2 as cv\n'), ((13620, 13705), 'cv2.bitwise_and', 'cv.bitwise_and', (['self.last_saved_slide', 'self.last_saved_slide'], {'mask': 'prepared_mask'}), '(self.last_saved_slide, self.last_saved_slide, mask=prepared_mask\n )\n', (13634, 13705), True, 'import cv2 as cv\n'), ((13821, 13869), 'cv2.bitwise_and', 'cv.bitwise_and', (['image', 'image'], {'mask': 'prepared_mask'}), '(image, image, mask=prepared_mask)\n', (13835, 13869), True, 'import cv2 as cv\n'), ((14677, 14710), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {'dtype': '"""float32"""'}), "((4, 2), dtype='float32')\n", (14685, 14710), True, 'import numpy as np\n'), ((14828, 14848), 'numpy.diff', 'np.diff', (['pts'], {'axis': '(1)'}), '(pts, axis=1)\n', (14835, 14848), True, 'import numpy as np\n'), ((15521, 15573), 'numpy.sqrt', 'np.sqrt', (['((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)'], {}), '((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)\n', (15528, 15573), True, 'import numpy as np\n'), ((15596, 15648), 'numpy.sqrt', 'np.sqrt', (['((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)'], {}), '((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)\n', (15603, 15648), True, 'import numpy as np\n'), ((15673, 15725), 'numpy.sqrt', 'np.sqrt', (['((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)'], {}), '((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)\n', (15680, 15725), True, 'import numpy as np\n'), ((15749, 15801), 'numpy.sqrt', 'np.sqrt', (['((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)'], {}), '((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)\n', (15756, 15801), True, 'import numpy as np\n'), ((15928, 16042), 'numpy.array', 'np.array', (['[[0, 0], [max_width - 1, 0], [max_width - 1, max_height - 1], [0, \n max_height - 1]]'], {'dtype': '"""float32"""'}), "([[0, 0], [max_width - 1, 0], [max_width - 1, max_height - 1], [0, \n max_height - 1]], dtype='float32')\n", (15936, 16042), True, 'import numpy as np\n'), ((16119, 16156), 'cv2.getPerspectiveTransform', 'cv.getPerspectiveTransform', (['rect', 'dst'], {}), '(rect, dst)\n', (16145, 16156), True, 'import cv2 as cv\n'), ((16172, 16232), 'cv2.warpPerspective', 'cv.warpPerspective', (['image', 'warp_mat', '(max_width, max_height)'], {}), '(image, warp_mat, (max_width, max_height))\n', (16190, 16232), True, 'import cv2 as cv\n'), ((16253, 16312), 'cv2.warpPerspective', 'cv.warpPerspective', (['mask', 'warp_mat', '(max_width, max_height)'], {}), '(mask, warp_mat, (max_width, max_height))\n', (16271, 16312), True, 'import cv2 as cv\n'), ((18609, 18660), 'cv2.drawContours', 'cv.drawContours', (['img', '[our_cnt]', '(-1)', '(0, 0, 255)', '(3)'], {}), '(img, [our_cnt], -1, (0, 0, 255), 3)\n', (18624, 18660), True, 'import cv2 as cv\n'), ((19562, 19738), 'cv2.inRange', 'cv.inRange', (['image', '(0.25 * self.main_col[0], 0.25 * self.main_col[1], 0.25 * self.main_col[2])', '(1.5 * self.main_col[0], 2.5 * self.main_col[1], 1.5 * self.main_col[2])'], {}), '(image, (0.25 * self.main_col[0], 0.25 * self.main_col[1], 0.25 *\n self.main_col[2]), (1.5 * self.main_col[0], 2.5 * self.main_col[1], 1.5 *\n self.main_col[2]))\n', (19572, 19738), True, 'import cv2 as cv\n'), ((19925, 19964), 'cv2.bitwise_and', 'cv.bitwise_and', (['image[:, :, 0]', 'c_boost'], {}), '(image[:, :, 0], c_boost)\n', (19939, 19964), True, 'import cv2 as cv\n'), ((19987, 20026), 'cv2.bitwise_and', 'cv.bitwise_and', (['image[:, :, 1]', 'c_boost'], {}), '(image[:, :, 1], c_boost)\n', (20001, 20026), True, 'import cv2 as cv\n'), ((20049, 20088), 'cv2.bitwise_and', 'cv.bitwise_and', (['image[:, :, 2]', 'c_boost'], {}), '(image[:, :, 2], c_boost)\n', (20063, 20088), True, 'import cv2 as cv\n'), ((20884, 20937), 'cv2.cvtColor', 'cv.cvtColor', (['self.last_saved_slide', 'cv.COLOR_BGR2GRAY'], {}), '(self.last_saved_slide, cv.COLOR_BGR2GRAY)\n', (20895, 20937), True, 'import cv2 as cv\n'), ((20957, 20994), 'cv2.cvtColor', 'cv.cvtColor', (['board', 'cv.COLOR_BGR2GRAY'], {}), '(board, cv.COLOR_BGR2GRAY)\n', (20968, 20994), True, 'import cv2 as cv\n'), ((21361, 21391), 'cv2.absdiff', 'cv.absdiff', (['gray_new', 'gray_old'], {}), '(gray_new, gray_old)\n', (21371, 21391), True, 'import cv2 as cv\n'), ((21481, 21518), 'cv2.dilate', 'cv.dilate', (['thresh', 'None'], {'iterations': '(2)'}), '(thresh, None, iterations=2)\n', (21490, 21518), True, 'import cv2 as cv\n'), ((21539, 21599), 'cv2.findContours', 'cv.findContours', (['inter', 'cv.RETR_TREE', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(inter, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n', (21554, 21599), True, 'import cv2 as cv\n'), ((29247, 29270), 'cv2.medianBlur', 'cv.medianBlur', (['image', '(5)'], {}), '(image, 5)\n', (29260, 29270), True, 'import cv2 as cv\n'), ((29714, 29750), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2HSV'], {}), '(image, cv.COLOR_BGR2HSV)\n', (29725, 29750), True, 'import cv2 as cv\n'), ((30009, 30049), 'cv2.imread', 'cv.imread', (['image', 'cv.CV_LOAD_IMAGE_COLOR'], {}), '(image, cv.CV_LOAD_IMAGE_COLOR)\n', (30018, 30049), True, 'import cv2 as cv\n'), ((30747, 30769), 'cv2.VideoCapture', 'cv.VideoCapture', (['video'], {}), '(video)\n', (30762, 30769), True, 'import cv2 as cv\n'), ((1154, 1165), 'time.time', 'time.time', ([], {}), '()\n', (1163, 1165), False, 'import time\n'), ((5066, 5087), 'cv2.arcLength', 'cv.arcLength', (['c', '(True)'], {}), '(c, True)\n', (5078, 5087), True, 'import cv2 as cv\n'), ((5109, 5145), 'cv2.approxPolyDP', 'cv.approxPolyDP', (['c', '(0.1 * peri)', '(True)'], {}), '(c, 0.1 * peri, True)\n', (5124, 5145), True, 'import cv2 as cv\n'), ((6723, 6786), 'cv2.circle', 'cv.circle', (['im', '(self.seed[0], self.seed[1])', '(5)', '(0, 0, 255)', '(-1)'], {}), '(im, (self.seed[0], self.seed[1]), 5, (0, 0, 255), -1)\n', (6732, 6786), True, 'import cv2 as cv\n'), ((6799, 6820), 'cv2.imshow', 'cv.imshow', (['"""seed"""', 'im'], {}), "('seed', im)\n", (6808, 6820), True, 'import cv2 as cv\n'), ((7486, 7519), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""lo"""', '"""result"""'], {}), "('lo', 'result')\n", (7503, 7519), True, 'import cv2 as cv\n'), ((7542, 7575), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""hi"""', '"""result"""'], {}), "('hi', 'result')\n", (7559, 7575), True, 'import cv2 as cv\n'), ((9788, 9804), 'cv2.mean', 'cv.mean', (['resized'], {}), '(resized)\n', (9795, 9804), True, 'import cv2 as cv\n'), ((10425, 10444), 'numpy.float32', 'np.float32', (['resized'], {}), '(resized)\n', (10435, 10444), True, 'import numpy as np\n'), ((13084, 13134), 'cv2.imread', 'cv.imread', (['last_slide_name', 'cv.CV_LOAD_IMAGE_COLOR'], {}), '(last_slide_name, cv.CV_LOAD_IMAGE_COLOR)\n', (13093, 13134), True, 'import cv2 as cv\n'), ((13222, 13271), 'cv2.imread', 'cv.imread', (['"""mask.png"""', 'cv.CV_LOAD_IMAGE_GRAYSCALE'], {}), "('mask.png', cv.CV_LOAD_IMAGE_GRAYSCALE)\n", (13231, 13271), True, 'import cv2 as cv\n'), ((14762, 14774), 'numpy.argmin', 'np.argmin', (['s'], {}), '(s)\n', (14771, 14774), True, 'import numpy as np\n'), ((14798, 14810), 'numpy.argmax', 'np.argmax', (['s'], {}), '(s)\n', (14807, 14810), True, 'import numpy as np\n'), ((14871, 14886), 'numpy.argmin', 'np.argmin', (['diff'], {}), '(diff)\n', (14880, 14886), True, 'import numpy as np\n'), ((14910, 14925), 'numpy.argmax', 'np.argmax', (['diff'], {}), '(diff)\n', (14919, 14925), True, 'import numpy as np\n'), ((17284, 17311), 'cv2.imshow', 'cv.imshow', (['"""output"""', 'output'], {}), "('output', output)\n", (17293, 17311), True, 'import cv2 as cv\n'), ((17454, 17482), 'cv2.imwrite', 'cv.imwrite', (['"""mask.png"""', 'mask'], {}), "('mask.png', mask)\n", (17464, 17482), True, 'import cv2 as cv\n'), ((18369, 18392), 'cv2.imshow', 'cv.imshow', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (18378, 18392), True, 'import cv2 as cv\n'), ((19069, 19098), 'cv2.imshow', 'cv.imshow', (['"""final image"""', 'img'], {}), "('final image', img)\n", (19078, 19098), True, 'import cv2 as cv\n'), ((19528, 19539), 'cv2.mean', 'cv.mean', (['im'], {}), '(im)\n', (19535, 19539), True, 'import cv2 as cv\n'), ((20125, 20154), 'cv2.imshow', 'cv.imshow', (['"""preprocesing"""', 'im'], {}), "('preprocesing', im)\n", (20134, 20154), True, 'import cv2 as cv\n'), ((20167, 20201), 'cv2.imwrite', 'cv.imwrite', (['"""preprocesing.png"""', 'im'], {}), "('preprocesing.png', im)\n", (20177, 20201), True, 'import cv2 as cv\n'), ((20749, 20789), 'numpy.zeros', 'np.zeros', (['board.shape[:2]'], {'dtype': '"""uint8"""'}), "(board.shape[:2], dtype='uint8')\n", (20757, 20789), True, 'import numpy as np\n'), ((21062, 21121), 'cv2.resize', 'cv.resize', (['gray_old', '(gray_new.shape[1], gray_new.shape[0])'], {}), '(gray_old, (gray_new.shape[1], gray_new.shape[0]))\n', (21071, 21121), True, 'import cv2 as cv\n'), ((21409, 21461), 'cv2.threshold', 'cv.threshold', (['frame_delta', '(50)', '(255)', 'cv.THRESH_BINARY'], {}), '(frame_delta, 50, 255, cv.THRESH_BINARY)\n', (21421, 21461), True, 'import cv2 as cv\n'), ((26002, 26045), 'numpy.zeros', 'np.zeros', (['(height, width, 3)'], {'dtype': '"""uint8"""'}), "((height, width, 3), dtype='uint8')\n", (26010, 26045), True, 'import numpy as np\n'), ((28663, 28697), 'cv2.imshow', 'cv.imshow', (['"""stitched image"""', 'blank'], {}), "('stitched image', blank)\n", (28672, 28697), True, 'import cv2 as cv\n'), ((28710, 28746), 'cv2.imshow', 'cv.imshow', (['"""board"""', 'self.debug_image'], {}), "('board', self.debug_image)\n", (28719, 28746), True, 'import cv2 as cv\n'), ((32997, 33019), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (33017, 33019), True, 'import cv2 as cv\n'), ((5277, 5333), 'numpy.array', 'np.array', (['[self.height * 1 / 3.0, self.height * 1 / 3.9]'], {}), '([self.height * 1 / 3.0, self.height * 1 / 3.9])\n', (5285, 5333), True, 'import numpy as np\n'), ((21235, 21294), 'cv2.resize', 'cv.resize', (['gray_new', '(gray_old.shape[1], gray_old.shape[0])'], {}), '(gray_new, (gray_old.shape[1], gray_old.shape[0]))\n', (21244, 21294), True, 'import cv2 as cv\n'), ((21710, 21727), 'cv2.contourArea', 'cv.contourArea', (['c'], {}), '(c)\n', (21724, 21727), True, 'import cv2 as cv\n'), ((21752, 21791), 'cv2.drawContours', 'cv.drawContours', (['inter', '[c]', '(0)', '(255)', '(-1)'], {}), '(inter, [c], 0, 255, -1)\n', (21767, 21791), True, 'import cv2 as cv\n'), ((21826, 21863), 'cv2.drawContours', 'cv.drawContours', (['inter', '[c]', '(0)', '(0)', '(-1)'], {}), '(inter, [c], 0, 0, -1)\n', (21841, 21863), True, 'import cv2 as cv\n'), ((24296, 24325), 'cv2.countNonZero', 'cv.countNonZero', (['intersection'], {}), '(intersection)\n', (24311, 24325), True, 'import cv2 as cv\n'), ((31085, 31096), 'time.time', 'time.time', ([], {}), '()\n', (31094, 31096), False, 'import time\n'), ((5514, 5537), 'numpy.array_equal', 'np.array_equal', (['a', 'true'], {}), '(a, true)\n', (5528, 5537), True, 'import numpy as np\n'), ((5541, 5564), 'numpy.array_equal', 'np.array_equal', (['b', 'true'], {}), '(b, true)\n', (5555, 5564), True, 'import numpy as np\n'), ((30284, 30297), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (30294, 30297), True, 'import cv2 as cv\n'), ((31166, 31177), 'time.time', 'time.time', ([], {}), '()\n', (31175, 31177), False, 'import time\n'), ((31501, 31514), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (31511, 31514), True, 'import cv2 as cv\n'), ((32181, 32196), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (32188, 32196), True, 'import numpy as np\n'), ((32217, 32240), 'cv2.imshow', 'cv.imshow', (['"""output"""', 'im'], {}), "('output', im)\n", (32226, 32240), True, 'import cv2 as cv\n'), ((5351, 5380), 'numpy.abs', 'np.abs', (['(approx[0] - approx[2])'], {}), '(approx[0] - approx[2])\n', (5357, 5380), True, 'import numpy as np\n'), ((5415, 5444), 'numpy.abs', 'np.abs', (['(approx[1] - approx[3])'], {}), '(approx[1] - approx[3])\n', (5421, 5444), True, 'import numpy as np\n'), ((24660, 24810), 'cv2.rectangle', 'cv.rectangle', (['self.debug_image', '(x * section_size[0], y * section_size[1])', '((x + 1) * section_size[0], (y + 1) * section_size[1])', '(0, 0, 255)', '(1)'], {}), '(self.debug_image, (x * section_size[0], y * section_size[1]),\n ((x + 1) * section_size[0], (y + 1) * section_size[1]), (0, 0, 255), 1)\n', (24672, 24810), True, 'import cv2 as cv\n')] |
import pandas as pd
import numpy as np
from melusine.prepare_email.body_header_extraction import extract_last_body
from melusine.prepare_email.body_header_extraction import extract_body
from melusine.prepare_email.body_header_extraction import extract_header
structured_body = [
{
"meta": {"date": None, "from": None, "to": None},
"structured_text": {
"header": "demande document",
"text": [
{"part": "Bonjour. ", "tags": "HELLO"},
{"part": "Je vous remercie pour le document", "tags": "BODY"},
{"part": "Cordialement,", "tags": "GREETINGS"},
{"part": "Mr Unknown", "tags": "BODY"},
],
},
},
{
"meta": {
"date": " mar. 22 mai 2018 à 10:20",
"from": " <<EMAIL>> ",
"to": None,
},
"structured_text": {
"header": "demande document",
"text": [
{"part": "Bonjour. ", "tags": "HELLO"},
{
"part": "Merci de bien vouloir prendre connaissance du document ci-joint",
"tags": "BODY",
},
{"part": "Cordialement,", "tags": "GREETINGS"},
{"part": "Votre mutuelle", "tags": "BODY"},
{
"part": "La visualisation des fichiers PDF nécessite Adobe Reader.",
"tags": "FOOTER",
},
],
},
},
]
def test_extract_last_body():
input_df = pd.DataFrame({"structured_body": [structured_body]})
output_df = pd.Series(["Je vous remercie pour le document "])
result = input_df.apply(extract_last_body, axis=1)
pd.testing.assert_series_equal(result, output_df)
message_dict = {
"meta": {
"date": " mar. 22 mai 2018 à 10:20",
"from": " <<EMAIL>> ",
"to": None,
},
"structured_text": {
"header": "demande document",
"text": [
{"part": "Bonjour. ", "tags": "HELLO"},
{
"part": "Merci de bien vouloir prendre connaissance du document ci-joint",
"tags": "BODY",
},
{"part": "Cordialement,", "tags": "GREETINGS"},
{"part": "Votre mutuelle", "tags": "BODY"},
{
"part": "La visualisation des fichiers PDF nécessite Adobe Reader.",
"tags": "FOOTER",
},
],
},
}
def test_extract_body():
input_dict = message_dict
output = "Merci de bien vouloir prendre connaissance du document ci-joint "
result = extract_body(input_dict)
np.testing.assert_string_equal(result, output)
def test_extract_header():
input_dict = message_dict
output = "demande document"
result = extract_header(input_dict)
np.testing.assert_string_equal(result, output)
| [
"pandas.DataFrame",
"melusine.prepare_email.body_header_extraction.extract_body",
"melusine.prepare_email.body_header_extraction.extract_header",
"numpy.testing.assert_string_equal",
"pandas.Series",
"pandas.testing.assert_series_equal"
] | [((1557, 1609), 'pandas.DataFrame', 'pd.DataFrame', (["{'structured_body': [structured_body]}"], {}), "({'structured_body': [structured_body]})\n", (1569, 1609), True, 'import pandas as pd\n'), ((1627, 1676), 'pandas.Series', 'pd.Series', (["['Je vous remercie pour le document ']"], {}), "(['Je vous remercie pour le document '])\n", (1636, 1676), True, 'import pandas as pd\n'), ((1737, 1786), 'pandas.testing.assert_series_equal', 'pd.testing.assert_series_equal', (['result', 'output_df'], {}), '(result, output_df)\n', (1767, 1786), True, 'import pandas as pd\n'), ((2645, 2669), 'melusine.prepare_email.body_header_extraction.extract_body', 'extract_body', (['input_dict'], {}), '(input_dict)\n', (2657, 2669), False, 'from melusine.prepare_email.body_header_extraction import extract_body\n'), ((2674, 2720), 'numpy.testing.assert_string_equal', 'np.testing.assert_string_equal', (['result', 'output'], {}), '(result, output)\n', (2704, 2720), True, 'import numpy as np\n'), ((2827, 2853), 'melusine.prepare_email.body_header_extraction.extract_header', 'extract_header', (['input_dict'], {}), '(input_dict)\n', (2841, 2853), False, 'from melusine.prepare_email.body_header_extraction import extract_header\n'), ((2858, 2904), 'numpy.testing.assert_string_equal', 'np.testing.assert_string_equal', (['result', 'output'], {}), '(result, output)\n', (2888, 2904), True, 'import numpy as np\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Eval"""
import os
import time
import datetime
import glob
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor, context
from mindspore.context import ParallelMode
from mindspore.communication.management import init, get_rank, get_group_size, release
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.common import dtype as mstype
from src.utils.logging import get_logger
from src.utils.auto_mixed_precision import auto_mixed_precision
from src.utils.var_init import load_pretrain_model
from src.models import build_network
from src.py_dataset import TTA_classification_dataset
from src.config import config
from src.model_utils.moxing_adapter import moxing_wrapper
class ParameterReduce(nn.Cell):
"""ParameterReduce"""
def __init__(self):
super(ParameterReduce, self).__init__()
self.cast = P.Cast()
self.reduce = P.AllReduce()
def construct(self, x):
one = self.cast(F.scalar_to_array(1.0), mstype.float32)
out = x * one
ret = self.reduce(out)
return ret
def set_parameters():
"""set_parameters"""
if config.run_distribute:
init()
config.rank = get_rank()
config.group_size = get_group_size()
else:
config.rank = 0
config.group_size = 1
config.outputs_dir = os.path.join(
config.log_path, datetime.datetime.now().strftime("%Y-%m-%d_time_%H_%M_%S")
)
config.logger = get_logger(config.outputs_dir, config.rank)
return config
def get_top5_acc(top5_arg, gt_class):
sub_count = 0
for top5, gt in zip(top5_arg, gt_class):
if gt in top5:
sub_count += 1
return sub_count
def get_result(model, top1_correct, top5_correct, img_tot):
"""calculate top1 and top5 value."""
results = [[top1_correct], [top5_correct], [img_tot]]
config.logger.info("before results=%s", results)
if config.run_distribute:
model_md5 = model.replace("/", "")
tmp_dir = "/cache"
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
top1_correct_npy = "/cache/top1_rank_{}_{}.npy".format(config.rank, model_md5)
top5_correct_npy = "/cache/top5_rank_{}_{}.npy".format(config.rank, model_md5)
img_tot_npy = "/cache/img_tot_rank_{}_{}.npy".format(config.rank, model_md5)
np.save(top1_correct_npy, top1_correct)
np.save(top5_correct_npy, top5_correct)
np.save(img_tot_npy, img_tot)
while True:
rank_ok = True
for other_rank in range(config.group_size):
top1_correct_npy = "/cache/top1_rank_{}_{}.npy".format(
other_rank, model_md5
)
top5_correct_npy = "/cache/top5_rank_{}_{}.npy".format(
other_rank, model_md5
)
img_tot_npy = "/cache/img_tot_rank_{}_{}.npy".format(
other_rank, model_md5
)
if (
not os.path.exists(top1_correct_npy)
or not os.path.exists(top5_correct_npy)
or not os.path.exists(img_tot_npy)
):
rank_ok = False
if rank_ok:
break
top1_correct_all = 0
top5_correct_all = 0
img_tot_all = 0
for other_rank in range(config.group_size):
top1_correct_npy = "/cache/top1_rank_{}_{}.npy".format(
other_rank, model_md5
)
top5_correct_npy = "/cache/top5_rank_{}_{}.npy".format(
other_rank, model_md5
)
img_tot_npy = "/cache/img_tot_rank_{}_{}.npy".format(other_rank, model_md5)
top1_correct_all += np.load(top1_correct_npy)
top5_correct_all += np.load(top5_correct_npy)
img_tot_all += np.load(img_tot_npy)
results = [[top1_correct_all], [top5_correct_all], [img_tot_all]]
results = np.array(results)
else:
results = np.array(results)
config.logger.info("after results=%s", results)
return results
@moxing_wrapper()
def test():
"""test"""
set_parameters()
context.set_context(
mode=context.GRAPH_MODE, device_target=config.device_target, save_graphs=False
)
if os.getenv("DEVICE_ID", "not_set").isdigit():
context.set_context(device_id=int(os.getenv("DEVICE_ID")))
# init distributed
if config.run_distribute:
parallel_mode = ParallelMode.DATA_PARALLEL
context.set_auto_parallel_context(
parallel_mode=parallel_mode,
device_num=config.group_size,
gradients_mean=True,
)
config.logger.save_args(config)
# network
config.logger.important_info("start create network")
config.models = [
"./exps/2021-11-11_time_22_45_19/ckpt_0_model_resnet50_bam/0_model_name_resnet50_bam-75_2672.ckpt",
"./exps/2021-11-15_time_18_11_00/ckpt_0_model_inception_resnet_v2/0_model_name_inception_resnet_v2-75_2672.ckpt",
"./exps/2021-11-11_time_22_45_19/ckpt_0_model_se_resnext50_wider/0_model_name_se_resnext50_wider-75_2672.ckpt",
]
config.networks = [
build_network("resnet50_bam_wider", config.num_classes),
build_network("inception_resnet_v2_wider", config.num_classes),
build_network("se_resnext50_wider", config.num_classes),
]
de1_dataset = TTA_classification_dataset(
config.eval_data_path,
image_size=config.image_size,
per_batch_size=config.per_batch_size,
max_epoch=1,
rank=config.rank,
group_size=config.group_size,
mode="eval1",
config=config,
)
de2_dataset = TTA_classification_dataset(
config.eval_data_path,
image_size=config.image_size,
per_batch_size=config.per_batch_size,
max_epoch=1,
rank=config.rank,
group_size=config.group_size,
mode="eval2",
config=config,
)
eval1_dataloader = de1_dataset.create_tuple_iterator(
output_numpy=True, num_epochs=1
)
eval2_dataloader = de2_dataset.create_tuple_iterator(
output_numpy=True, num_epochs=1
)
model1, model2, model3 = config.models
network1, network2, network3 = config.networks
load_pretrain_model(model1, network1, config)
load_pretrain_model(model2, network2, config)
load_pretrain_model(model3, network3, config)
img_tot = 0
top1_correct = 0
top5_correct = 0
if config.device_target == "Ascend":
network1.to_float(mstype.float16)
network2.to_float(mstype.float16)
network3.to_float(mstype.float16)
else:
auto_mixed_precision(network1)
auto_mixed_precision(network2)
auto_mixed_precision(network3)
network1.set_train(False)
network2.set_train(False)
network3.set_train(False)
t_end = time.time()
it = 0
for (data1, gt_classes1), (data2, gt_classes2) in zip(
eval1_dataloader, eval2_dataloader
):
output1 = network1(Tensor(data1, mstype.float32))
output2 = network2(Tensor(data1, mstype.float32))
output3 = network3(Tensor(data1, mstype.float32))
output12 = network1(Tensor(data2, mstype.float32))
output22 = network2(Tensor(data2, mstype.float32))
output32 = network3(Tensor(data2, mstype.float32))
output = (output1 + output2 + output12 + output22+ output3+output32) / 6
output = output.asnumpy()
top1_output = np.argmax(output, (-1))
top5_output = np.argsort(output)[:, -5:]
t1_correct = np.equal(top1_output, gt_classes1).sum()
top1_correct += t1_correct
top5_correct += get_top5_acc(top5_output, gt_classes1)
img_tot += config.per_batch_size
if config.rank == 0 and it == 0:
t_end = time.time()
it = 1
if config.rank == 0:
time_used = time.time() - t_end
fps = (img_tot - config.per_batch_size) * config.group_size / time_used
config.logger.info("Inference Performance: {:.2f} img/sec".format(fps))
results = get_result(model1, top1_correct, top5_correct, img_tot)
top1_correct = results[0, 0]
top5_correct = results[1, 0]
img_tot = results[2, 0]
acc1 = 100.0 * top1_correct / img_tot
acc5 = 100.0 * top5_correct / img_tot
config.logger.info(
"after allreduce eval: top1_correct={}, tot={},"
"acc={:.2f}%(TOP1)".format(top1_correct, img_tot, acc1)
)
config.logger.info(
"after allreduce eval: top5_correct={}, tot={},"
"acc={:.2f}%(TOP5)".format(top5_correct, img_tot, acc5)
)
if config.run_distribute:
release()
if __name__ == "__main__":
test()
| [
"os.mkdir",
"src.config.config.logger.info",
"src.config.config.logger.save_args",
"numpy.load",
"numpy.argmax",
"mindspore.ops.operations.Cast",
"mindspore.Tensor",
"numpy.argsort",
"src.utils.var_init.load_pretrain_model",
"src.utils.logging.get_logger",
"src.model_utils.moxing_adapter.moxing_... | [((4820, 4836), 'src.model_utils.moxing_adapter.moxing_wrapper', 'moxing_wrapper', ([], {}), '()\n', (4834, 4836), False, 'from src.model_utils.moxing_adapter import moxing_wrapper\n'), ((2158, 2201), 'src.utils.logging.get_logger', 'get_logger', (['config.outputs_dir', 'config.rank'], {}), '(config.outputs_dir, config.rank)\n', (2168, 2201), False, 'from src.utils.logging import get_logger\n'), ((2559, 2607), 'src.config.config.logger.info', 'config.logger.info', (['"""before results=%s"""', 'results'], {}), "('before results=%s', results)\n", (2577, 2607), False, 'from src.config import config\n'), ((4750, 4797), 'src.config.config.logger.info', 'config.logger.info', (['"""after results=%s"""', 'results'], {}), "('after results=%s', results)\n", (4768, 4797), False, 'from src.config import config\n'), ((4889, 4993), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': 'config.device_target', 'save_graphs': '(False)'}), '(mode=context.GRAPH_MODE, device_target=config.\n device_target, save_graphs=False)\n', (4908, 4993), False, 'from mindspore import Tensor, context\n'), ((5401, 5432), 'src.config.config.logger.save_args', 'config.logger.save_args', (['config'], {}), '(config)\n', (5424, 5432), False, 'from src.config import config\n'), ((5452, 5504), 'src.config.config.logger.important_info', 'config.logger.important_info', (['"""start create network"""'], {}), "('start create network')\n", (5480, 5504), False, 'from src.config import config\n'), ((6136, 6353), 'src.py_dataset.TTA_classification_dataset', 'TTA_classification_dataset', (['config.eval_data_path'], {'image_size': 'config.image_size', 'per_batch_size': 'config.per_batch_size', 'max_epoch': '(1)', 'rank': 'config.rank', 'group_size': 'config.group_size', 'mode': '"""eval1"""', 'config': 'config'}), "(config.eval_data_path, image_size=config.\n image_size, per_batch_size=config.per_batch_size, max_epoch=1, rank=\n config.rank, group_size=config.group_size, mode='eval1', config=config)\n", (6162, 6353), False, 'from src.py_dataset import TTA_classification_dataset\n'), ((6433, 6650), 'src.py_dataset.TTA_classification_dataset', 'TTA_classification_dataset', (['config.eval_data_path'], {'image_size': 'config.image_size', 'per_batch_size': 'config.per_batch_size', 'max_epoch': '(1)', 'rank': 'config.rank', 'group_size': 'config.group_size', 'mode': '"""eval2"""', 'config': 'config'}), "(config.eval_data_path, image_size=config.\n image_size, per_batch_size=config.per_batch_size, max_epoch=1, rank=\n config.rank, group_size=config.group_size, mode='eval2', config=config)\n", (6459, 6650), False, 'from src.py_dataset import TTA_classification_dataset\n'), ((7021, 7066), 'src.utils.var_init.load_pretrain_model', 'load_pretrain_model', (['model1', 'network1', 'config'], {}), '(model1, network1, config)\n', (7040, 7066), False, 'from src.utils.var_init import load_pretrain_model\n'), ((7071, 7116), 'src.utils.var_init.load_pretrain_model', 'load_pretrain_model', (['model2', 'network2', 'config'], {}), '(model2, network2, config)\n', (7090, 7116), False, 'from src.utils.var_init import load_pretrain_model\n'), ((7121, 7166), 'src.utils.var_init.load_pretrain_model', 'load_pretrain_model', (['model3', 'network3', 'config'], {}), '(model3, network3, config)\n', (7140, 7166), False, 'from src.utils.var_init import load_pretrain_model\n'), ((7625, 7636), 'time.time', 'time.time', ([], {}), '()\n', (7634, 7636), False, 'import time\n'), ((1561, 1569), 'mindspore.ops.operations.Cast', 'P.Cast', ([], {}), '()\n', (1567, 1569), True, 'from mindspore.ops import operations as P\n'), ((1592, 1605), 'mindspore.ops.operations.AllReduce', 'P.AllReduce', ([], {}), '()\n', (1603, 1605), True, 'from mindspore.ops import operations as P\n'), ((1858, 1864), 'mindspore.communication.management.init', 'init', ([], {}), '()\n', (1862, 1864), False, 'from mindspore.communication.management import init, get_rank, get_group_size, release\n'), ((1887, 1897), 'mindspore.communication.management.get_rank', 'get_rank', ([], {}), '()\n', (1895, 1897), False, 'from mindspore.communication.management import init, get_rank, get_group_size, release\n'), ((1926, 1942), 'mindspore.communication.management.get_group_size', 'get_group_size', ([], {}), '()\n', (1940, 1942), False, 'from mindspore.communication.management import init, get_rank, get_group_size, release\n'), ((3045, 3084), 'numpy.save', 'np.save', (['top1_correct_npy', 'top1_correct'], {}), '(top1_correct_npy, top1_correct)\n', (3052, 3084), True, 'import numpy as np\n'), ((3093, 3132), 'numpy.save', 'np.save', (['top5_correct_npy', 'top5_correct'], {}), '(top5_correct_npy, top5_correct)\n', (3100, 3132), True, 'import numpy as np\n'), ((3141, 3170), 'numpy.save', 'np.save', (['img_tot_npy', 'img_tot'], {}), '(img_tot_npy, img_tot)\n', (3148, 3170), True, 'import numpy as np\n'), ((4681, 4698), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (4689, 4698), True, 'import numpy as np\n'), ((4727, 4744), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (4735, 4744), True, 'import numpy as np\n'), ((5235, 5353), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': 'parallel_mode', 'device_num': 'config.group_size', 'gradients_mean': '(True)'}), '(parallel_mode=parallel_mode, device_num=\n config.group_size, gradients_mean=True)\n', (5268, 5353), False, 'from mindspore import Tensor, context\n'), ((5917, 5972), 'src.models.build_network', 'build_network', (['"""resnet50_bam_wider"""', 'config.num_classes'], {}), "('resnet50_bam_wider', config.num_classes)\n", (5930, 5972), False, 'from src.models import build_network\n'), ((5982, 6044), 'src.models.build_network', 'build_network', (['"""inception_resnet_v2_wider"""', 'config.num_classes'], {}), "('inception_resnet_v2_wider', config.num_classes)\n", (5995, 6044), False, 'from src.models import build_network\n'), ((6054, 6109), 'src.models.build_network', 'build_network', (['"""se_resnext50_wider"""', 'config.num_classes'], {}), "('se_resnext50_wider', config.num_classes)\n", (6067, 6109), False, 'from src.models import build_network\n'), ((7412, 7442), 'src.utils.auto_mixed_precision.auto_mixed_precision', 'auto_mixed_precision', (['network1'], {}), '(network1)\n', (7432, 7442), False, 'from src.utils.auto_mixed_precision import auto_mixed_precision\n'), ((7451, 7481), 'src.utils.auto_mixed_precision.auto_mixed_precision', 'auto_mixed_precision', (['network2'], {}), '(network2)\n', (7471, 7481), False, 'from src.utils.auto_mixed_precision import auto_mixed_precision\n'), ((7490, 7520), 'src.utils.auto_mixed_precision.auto_mixed_precision', 'auto_mixed_precision', (['network3'], {}), '(network3)\n', (7510, 7520), False, 'from src.utils.auto_mixed_precision import auto_mixed_precision\n'), ((8250, 8271), 'numpy.argmax', 'np.argmax', (['output', '(-1)'], {}), '(output, -1)\n', (8259, 8271), True, 'import numpy as np\n'), ((9435, 9444), 'mindspore.communication.management.release', 'release', ([], {}), '()\n', (9442, 9444), False, 'from mindspore.communication.management import init, get_rank, get_group_size, release\n'), ((1659, 1681), 'mindspore.ops.functional.scalar_to_array', 'F.scalar_to_array', (['(1.0)'], {}), '(1.0)\n', (1676, 1681), True, 'from mindspore.ops import functional as F\n'), ((2723, 2746), 'os.path.exists', 'os.path.exists', (['tmp_dir'], {}), '(tmp_dir)\n', (2737, 2746), False, 'import os\n'), ((2760, 2777), 'os.mkdir', 'os.mkdir', (['tmp_dir'], {}), '(tmp_dir)\n', (2768, 2777), False, 'import os\n'), ((4457, 4482), 'numpy.load', 'np.load', (['top1_correct_npy'], {}), '(top1_correct_npy)\n', (4464, 4482), True, 'import numpy as np\n'), ((4515, 4540), 'numpy.load', 'np.load', (['top5_correct_npy'], {}), '(top5_correct_npy)\n', (4522, 4540), True, 'import numpy as np\n'), ((4568, 4588), 'numpy.load', 'np.load', (['img_tot_npy'], {}), '(img_tot_npy)\n', (4575, 4588), True, 'import numpy as np\n'), ((5010, 5043), 'os.getenv', 'os.getenv', (['"""DEVICE_ID"""', '"""not_set"""'], {}), "('DEVICE_ID', 'not_set')\n", (5019, 5043), False, 'import os\n'), ((7785, 7814), 'mindspore.Tensor', 'Tensor', (['data1', 'mstype.float32'], {}), '(data1, mstype.float32)\n', (7791, 7814), False, 'from mindspore import Tensor, context\n'), ((7843, 7872), 'mindspore.Tensor', 'Tensor', (['data1', 'mstype.float32'], {}), '(data1, mstype.float32)\n', (7849, 7872), False, 'from mindspore import Tensor, context\n'), ((7901, 7930), 'mindspore.Tensor', 'Tensor', (['data1', 'mstype.float32'], {}), '(data1, mstype.float32)\n', (7907, 7930), False, 'from mindspore import Tensor, context\n'), ((7961, 7990), 'mindspore.Tensor', 'Tensor', (['data2', 'mstype.float32'], {}), '(data2, mstype.float32)\n', (7967, 7990), False, 'from mindspore import Tensor, context\n'), ((8020, 8049), 'mindspore.Tensor', 'Tensor', (['data2', 'mstype.float32'], {}), '(data2, mstype.float32)\n', (8026, 8049), False, 'from mindspore import Tensor, context\n'), ((8079, 8108), 'mindspore.Tensor', 'Tensor', (['data2', 'mstype.float32'], {}), '(data2, mstype.float32)\n', (8085, 8108), False, 'from mindspore import Tensor, context\n'), ((8296, 8314), 'numpy.argsort', 'np.argsort', (['output'], {}), '(output)\n', (8306, 8314), True, 'import numpy as np\n'), ((8587, 8598), 'time.time', 'time.time', ([], {}), '()\n', (8596, 8598), False, 'import time\n'), ((8664, 8675), 'time.time', 'time.time', ([], {}), '()\n', (8673, 8675), False, 'import time\n'), ((2072, 2095), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2093, 2095), False, 'import datetime\n'), ((8345, 8379), 'numpy.equal', 'np.equal', (['top1_output', 'gt_classes1'], {}), '(top1_output, gt_classes1)\n', (8353, 8379), True, 'import numpy as np\n'), ((5097, 5119), 'os.getenv', 'os.getenv', (['"""DEVICE_ID"""'], {}), "('DEVICE_ID')\n", (5106, 5119), False, 'import os\n'), ((3713, 3745), 'os.path.exists', 'os.path.exists', (['top1_correct_npy'], {}), '(top1_correct_npy)\n', (3727, 3745), False, 'import os\n'), ((3773, 3805), 'os.path.exists', 'os.path.exists', (['top5_correct_npy'], {}), '(top5_correct_npy)\n', (3787, 3805), False, 'import os\n'), ((3833, 3860), 'os.path.exists', 'os.path.exists', (['img_tot_npy'], {}), '(img_tot_npy)\n', (3847, 3860), False, 'import os\n')] |
import tensorflow as tf
import os
import glob
import vng_model as md
import numpy as np
import csv
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('checkpoint_dir', '',
"""Direction where the trained weights of model is save""")
tf.app.flags.DEFINE_string('eval_data_path', '',
"""The direction of folder that contains evaluation data-set""")
tf.app.flags.DEFINE_integer('batch_size', 100,
"""The size of a data batch""")
tf.app.flags.DEFINE_string('output_path', '',
"""The direction of folder that contains the output of model""")
def read_data(filename_queue, height, width, channels):
reader = tf.WholeFileReader()
filename, value = reader.read(filename_queue)
image_sample = tf.image.decode_jpeg(value, channels=channels)
image_sample = tf.expand_dims(image_sample, 0)
image_sample = tf.image.resize_bilinear(image_sample, [height, width])
image_sample = tf.squeeze(image_sample, [0])
return image_sample, filename
def eval_model():
"""
The function evaluate the model
:return:
"""
eval_data_path = []
for path in glob.iglob(os.path.join(FLAGS.eval_data_path, '*.jpeg')):
eval_data_path.append(path)
with tf.Graph().as_default() as g:
filename_queue = tf.train.string_input_producer(eval_data_path)
sample, filename = read_data(filename_queue, 224, 224, 3)
batch_samples, batch_filename = tf.train.batch([sample, filename],
batch_size=FLAGS.batch_size,
capacity=FLAGS.batch_size,
name='input_test_data')
# Build the VNG model
x = tf.placeholder(tf.float32, (None, 224, 224, 3), name='input_features')
y_ = tf.placeholder(tf.int32, (None,), name='labels')
logit = md.inference_resnet(x, False)
hat_y = tf.arg_max(logit, 1, name='predict_label')
# Load trained weights into model
ckpt_model = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
saver_model = tf.train.Saver(var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))
# Run
coord = tf.train.Coordinator()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if ckpt_model and ckpt_model.model_checkpoint_path:
saver_model.restore(sess, ckpt_model.model_checkpoint_path)
print('Load trained weights successfully!')
else:
print('No checkpoint found!')
num_iter = len(eval_data_path) / FLAGS.batch_size
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
with open(FLAGS.output_path, "wb") as f:
writer = csv.writer(f)
for _ in range(num_iter):
batch_images, batch_name = sess.run([batch_samples, batch_filename])
predicted_lb = sess.run(hat_y, feed_dict={x:batch_images})
result_model = np.column_stack((np.array(batch_name),
np.array(predicted_lb)))
writer.writerows(result_model)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=5)
sess.close()
| [
"tensorflow.train.Coordinator",
"tensorflow.get_collection",
"tensorflow.train.batch",
"tensorflow.app.flags.DEFINE_integer",
"os.path.join",
"tensorflow.train.start_queue_runners",
"tensorflow.placeholder",
"tensorflow.squeeze",
"tensorflow.train.string_input_producer",
"tensorflow.train.get_chec... | [((128, 236), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""checkpoint_dir"""', '""""""', '"""Direction where the trained weights of model is save"""'], {}), "('checkpoint_dir', '',\n 'Direction where the trained weights of model is save')\n", (154, 236), True, 'import tensorflow as tf\n'), ((265, 378), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""eval_data_path"""', '""""""', '"""The direction of folder that contains evaluation data-set"""'], {}), "('eval_data_path', '',\n 'The direction of folder that contains evaluation data-set')\n", (291, 378), True, 'import tensorflow as tf\n'), ((407, 481), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(100)', '"""The size of a data batch"""'], {}), "('batch_size', 100, 'The size of a data batch')\n", (434, 481), True, 'import tensorflow as tf\n'), ((515, 625), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""output_path"""', '""""""', '"""The direction of folder that contains the output of model"""'], {}), "('output_path', '',\n 'The direction of folder that contains the output of model')\n", (541, 625), True, 'import tensorflow as tf\n'), ((724, 744), 'tensorflow.WholeFileReader', 'tf.WholeFileReader', ([], {}), '()\n', (742, 744), True, 'import tensorflow as tf\n'), ((816, 862), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['value'], {'channels': 'channels'}), '(value, channels=channels)\n', (836, 862), True, 'import tensorflow as tf\n'), ((883, 914), 'tensorflow.expand_dims', 'tf.expand_dims', (['image_sample', '(0)'], {}), '(image_sample, 0)\n', (897, 914), True, 'import tensorflow as tf\n'), ((934, 989), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['image_sample', '[height, width]'], {}), '(image_sample, [height, width])\n', (958, 989), True, 'import tensorflow as tf\n'), ((1009, 1038), 'tensorflow.squeeze', 'tf.squeeze', (['image_sample', '[0]'], {}), '(image_sample, [0])\n', (1019, 1038), True, 'import tensorflow as tf\n'), ((1211, 1255), 'os.path.join', 'os.path.join', (['FLAGS.eval_data_path', '"""*.jpeg"""'], {}), "(FLAGS.eval_data_path, '*.jpeg')\n", (1223, 1255), False, 'import os\n'), ((1360, 1406), 'tensorflow.train.string_input_producer', 'tf.train.string_input_producer', (['eval_data_path'], {}), '(eval_data_path)\n', (1390, 1406), True, 'import tensorflow as tf\n'), ((1515, 1634), 'tensorflow.train.batch', 'tf.train.batch', (['[sample, filename]'], {'batch_size': 'FLAGS.batch_size', 'capacity': 'FLAGS.batch_size', 'name': '"""input_test_data"""'}), "([sample, filename], batch_size=FLAGS.batch_size, capacity=\n FLAGS.batch_size, name='input_test_data')\n", (1529, 1634), True, 'import tensorflow as tf\n'), ((1838, 1908), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, 224, 224, 3)'], {'name': '"""input_features"""'}), "(tf.float32, (None, 224, 224, 3), name='input_features')\n", (1852, 1908), True, 'import tensorflow as tf\n'), ((1922, 1970), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '(None,)'], {'name': '"""labels"""'}), "(tf.int32, (None,), name='labels')\n", (1936, 1970), True, 'import tensorflow as tf\n'), ((1988, 2017), 'vng_model.inference_resnet', 'md.inference_resnet', (['x', '(False)'], {}), '(x, False)\n', (2007, 2017), True, 'import vng_model as md\n'), ((2035, 2077), 'tensorflow.arg_max', 'tf.arg_max', (['logit', '(1)'], {'name': '"""predict_label"""'}), "(logit, 1, name='predict_label')\n", (2045, 2077), True, 'import tensorflow as tf\n'), ((2142, 2193), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (2171, 2193), True, 'import tensorflow as tf\n'), ((2322, 2344), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (2342, 2344), True, 'import tensorflow as tf\n'), ((2358, 2370), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2368, 2370), True, 'import tensorflow as tf\n'), ((2788, 2840), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess', 'coord': 'coord'}), '(sess=sess, coord=coord)\n', (2816, 2840), True, 'import tensorflow as tf\n'), ((1304, 1314), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1312, 1314), True, 'import tensorflow as tf\n'), ((2241, 2289), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (2258, 2289), True, 'import tensorflow as tf\n'), ((2401, 2434), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2432, 2434), True, 'import tensorflow as tf\n'), ((2920, 2933), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (2930, 2933), False, 'import csv\n'), ((3199, 3219), 'numpy.array', 'np.array', (['batch_name'], {}), '(batch_name)\n', (3207, 3219), True, 'import numpy as np\n'), ((3273, 3295), 'numpy.array', 'np.array', (['predicted_lb'], {}), '(predicted_lb)\n', (3281, 3295), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np # scientific computing lib
import sounddevice as sd # sounddevice / hostapi handling
import sys # used for printing err to std stream
import matplotlib.pyplot as plt # plots and graphs
import os # functions for operatingsystem
import os.path # functions for operatingsystem
from PyQt5 import QtWidgets # GUI framework
from PyQt5.QtCore import Qt, QTimer, pyqtSlot
from PyQt5.QtWidgets import (QApplication, QComboBox, QDialog,
QGridLayout, QTabWidget, QProgressBar,
QGroupBox, QHBoxLayout, QLabel, QLineEdit,
QPushButton, QWidget, QFormLayout,
QFileDialog, QSizePolicy)
import pyqtgraph as pg # scientific plots
from pyqtgraph.Qt import QtGui, QtCore # GUI framework
import pyaudio # cross-platform audio I/O
import struct # interpret bytes as packed binary data
from scipy.fftpack import fft # Fast-Fourier-Transform
import scipy.io.wavfile as wavfile # read .wav-files
import haiopy as haiopy # package for audio rec, play, monitor
# ----------------------------------------------
# enable highdpi scaling
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
# use highdpi icons
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
# ----------------------------------------------
class WidgetGallery(QDialog):
def __init__(self, parent=None):
super(WidgetGallery, self).__init__(parent)
self.originalPalette = QApplication.palette()
styleLabel = QLabel("Gruppe 6 - Audio Devices:")
settingButton = QPushButton('Settings')
settingButton.clicked.connect(settingButton_on_click)
self.RecordBox()
self.playMusicBox()
self.createProgressBar()
# self.SpectrumAnalyzer()
topLayout = QHBoxLayout()
topLayout.addWidget(styleLabel)
topLayout.addStretch(50)
topLayout.addWidget(settingButton)
mainLayout = QGridLayout()
mainLayout.setSpacing(20)
mainLayout.addLayout(topLayout, 0, 0, 1, 2)
mainLayout.addWidget(self.RecordFunction, 1, 0)
mainLayout.addWidget(self.playMusicFuntion, 1, 1, 1, 1)
mainLayout.addWidget(self.progressBar, 2, 0, 1, 2)
mainLayout.setRowStretch(3, 1)
# mainLayout.addWidget(self.PlotBox, 4, 0, 1, 4)
self.setLayout(mainLayout)
self.setWindowTitle("GUI")
self.setFixedSize(400, 280)
QApplication.setStyle('Fusion')
def RecordBox(self):
self.RecordFunction = QGroupBox("Record")
self.RecordFunction.setCheckable(True)
self.RecordFunction.setChecked(True)
topLayout = QFormLayout()
self.duration_input = QLineEdit('')
self.duration_input.setFixedWidth(80)
topLayout.addRow("Duration/s:", self.duration_input)
self.RecordButton = QPushButton("Record")
self.RecordButton.setCheckable(True)
self.RecordButton.setChecked(False)
self.RecordButton.clicked.connect(self.RecordButtonClicked)
# self.RecordButton.clicked.connect(recordButton_on_clicked)
self.RecordButton.setStyleSheet("background-color : normal")
self.StopButton = QPushButton("Stop")
self.StopButton.setDefault(True)
self.StopButton.clicked.connect(self.StopButtonClicked)
PlotButton = QPushButton('Plot')
PlotButton.clicked.connect(PlotButton_on_clicked)
layout = QGridLayout()
# layout.setSpacing(5)
layout.addLayout(topLayout, 0, 0, 1, 2)
layout.addWidget(self.RecordButton, 1, 0)
layout.addWidget(self.StopButton, 1, 1)
layout.addWidget(PlotButton, 2, 0, 1, 2)
# layout.addStretch(1)
self.RecordFunction.setLayout(layout)
def RecordButtonClicked(self):
if self.RecordButton.isChecked():
self.RecordButton.setStyleSheet("background-color: red")
else:
self.RecordButton.setStyleSheet("background-color: normal")
self.duration = self.duration_input.text()
self.duration = int(self.duration)
self.duration_input.setEnabled(False)
self.progressBar.setRange(0, self.duration)
self.timer = QTimer(self)
self.timer.timeout.connect(self.advanceProgressBar)
self.timer.start(1000)
self.recordsi = haiopy.Record('wav', device_in=0)
self.recordedsignal = self.recordsi.record(duration=self.duration)
def StopButtonClicked(self):
self.RecordButton.setStyleSheet("background-color: normal")
self.RecordButton.setChecked(False)
self.duration_input.setEnabled(True)
self.timer.stop()
self.recordsi.on_stop()
self.progressBar.setValue(0)
self.curVal = 0
# def getDuration(self):
# print(self.duration_input.text())
# self.duration_input.setEnabled(False)
def playMusicBox(self):
self.playMusicFuntion = QTabWidget()
self.playMusicFuntion.setSizePolicy(QSizePolicy.Preferred,
QSizePolicy.Ignored)
tab1 = QWidget()
self.OpenWav = QPushButton('Open')
self.OpenWav.clicked.connect(self.openFileNamesDialog)
self.OpenWav.setFixedWidth(50)
topLayout = QFormLayout()
self.show_openedFile = QLineEdit('file path')
self.show_openedFile.setEnabled(False)
topLayout.addRow('File Name:', self.show_openedFile)
self.Play_PlayButton = QPushButton("Play")
self.Play_PlayButton.setCheckable(True)
self.Play_PlayButton.setChecked(False)
self.Play_PlayButton.clicked.connect(self.Play_PlayButtonClicked)
self.Play_PlayButton.setStyleSheet("background-color : normal")
self.Play_StopButton = QPushButton("Stop")
self.Play_StopButton.setDefault(True)
self.Play_StopButton.clicked.connect(self.Play_StopButtonClicked)
self.Play_PlotButton = QPushButton('Plot')
self.Play_PlotButton.clicked.connect(self.plot_wavfile)
layout = QGridLayout()
layout.addWidget(self.OpenWav, 0, 0)
layout.addLayout(topLayout, 1, 0, 1, 2)
layout.addWidget(self.Play_PlayButton, 2, 0)
layout.addWidget(self.Play_StopButton, 2, 1)
layout.addWidget(self.Play_PlotButton, 3, 0, 1, 2)
layout.setContentsMargins(5, 5, 5, 5)
tab1.setLayout(layout)
tab2 = QWidget()
self.playrec_OpenWav = QPushButton('Open')
self.playrec_OpenWav.clicked.connect(self.openFileNamesDialog2)
self.playrec_OpenWav.setFixedWidth(50)
topLayout_playrec = QFormLayout()
self.playrec_show_openedFile = QLineEdit('file path')
self.playrec_show_openedFile.setEnabled(False)
# self.show_openFile.setFixedWidth(100)
topLayout_playrec.addRow('File Name:', self.playrec_show_openedFile)
self.recplay_RecordButton = QPushButton("Record")
self.recplay_RecordButton.setCheckable(True)
self.recplay_RecordButton.setChecked(False)
self.recplay_RecordButton.clicked.connect(self.
recplay_RecordButtonClicked)
self.recplay_RecordButton.setStyleSheet("background-color : normal")
self.playrec_StopButton = QPushButton("Stop")
self.playrec_StopButton.setDefault(True)
self.playrec_StopButton.clicked.connect(self.playrec_StopButtonClicked)
layout_playrec = QGridLayout()
layout_playrec.addWidget(self.playrec_OpenWav, 0, 0)
layout_playrec.addLayout(topLayout_playrec, 1, 0, 1, 2)
layout_playrec.addWidget(self.recplay_RecordButton, 2, 0)
layout_playrec.addWidget(self.playrec_StopButton, 2, 1)
# layout.setContentsMargins(5, 5, 5, 5)
# layout_playrec.setContentsMargins(5, 5, 5, 5)
tab2.setLayout(layout_playrec)
self.playMusicFuntion.addTab(tab1, "Play")
self.playMusicFuntion.addTab(tab2, "PlayRecord")
def openFileNamesDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
self.files, _ = QFileDialog.getOpenFileNames(self,
"QFileDialog.\
getOpenFileNames()",
"",
"All Files (*);;\
Python Files (*.py)",
options=options)
if self.files:
self.filesName = str(self.files).split('/')
self.filesName = self.filesName[-1][:-2]
self.show_openedFile.setText(self.filesName)
# print(self.files)
def openFileNamesDialog2(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
self.files, _ = QFileDialog.getOpenFileNames(self,
"QFileDialog.\
getOpenFileNames()",
"",
"All Files (*);;\
Python Files (*.py)",
options=options)
if self.files:
self.filesName = str(self.files).split('/')
self.filesName = self.filesName[-1][:-2]
self.playrec_show_openedFile.setText(self.filesName)
def recplay_RecordButtonClicked(self):
if self.playrec_show_openedFile.text() != 'file path':
self.suffix = self.filesName.split('.')
if self.filesName and self.suffix[-1] == 'wav':
self.recplay_RecordButton.setStyleSheet("background-color:red")
fi = str(self.files[0])
self.playrectest = haiopy.PlayRecord(audio_in='wav',
audio_out=fi,
device_in=sd.default.device[0],
device_out=sd.default.device[1],
sampling_rate=44100)
self.playrectest.playrec()
self.progressBar.setRange(0, self.playrectest.duration)
self.timer = QTimer(self)
self.timer.timeout.connect(self.advanceProgressBar)
self.timer.start(1000)
else:
self.recplay_RecordButton.setStyleSheet("background-\
color: normal")
popup = PopupWindow(self)
popup.setGeometry(400, 400, 500, 100)
popup.show()
self.recplay_RecordButton.setChecked(False)
else:
self.recplay_RecordButton.setStyleSheet("background-\
color: normal")
popup = PopupWindow(self)
popup.setGeometry(400, 400, 500, 100)
popup.show()
self.recplay_RecordButton.setChecked(False)
def Play_PlayButtonClicked(self):
if self.show_openedFile.text() != 'file path':
self.suffix = self.filesName.split('.')
if self.filesName and self.suffix[-1] == 'wav':
self.playsi = haiopy.Play(self.filesName,
device_out=sd.default.device[1])
self.playsi.play()
self.progressBar.setRange(0, self.playsi.duration)
self.timer = QTimer(self)
self.timer.timeout.connect(self.advanceProgressBar)
self.timer.start(1000)
else:
popup = PopupWindow(self)
popup.setGeometry(400, 400, 500, 100)
popup.show()
self.Play_PlayButton.setChecked(False)
else:
popup = PopupWindow(self)
popup.setGeometry(400, 400, 500, 100)
popup.show()
self.Play_PlayButton.setChecked(False)
def Play_StopButtonClicked(self):
self.timer.stop()
self.Play_PlayButton.setStyleSheet("background-color: normal")
self.Play_PlayButton.setChecked(False)
self.playsi.output_stream.abort()
self.progressBar.setValue(0)
self.curVal = 0
def playrec_StopButtonClicked(self):
self.timer.stop()
self.recplay_RecordButton.setStyleSheet("background-color: normal")
self.recplay_RecordButton.setChecked(False)
self.playrectest.playrec_stream.abort()
self.progressBar.setValue(0)
self.curVal = 0
def plot_wavfile(self):
if self.show_openedFile.text() != 'file path':
self.suffix = self.filesName.split('.')
if self.filesName and self.suffix[-1] == 'wav':
print(self.files[0])
myAudioFilename = str(self.files[0])
# homedir -> audiodir -> my wav files
dataset_path = os.path.join(os.environ['HOME'], 'audio')
wavedata = os.path.join(dataset_path, myAudioFilename)
sampleRate, audioBuffer = wavfile.read(wavedata)
duration = len(audioBuffer)/sampleRate
# time vector
time = np.arange(0, duration, 1/sampleRate)
if audioBuffer.shape[1] == 2:
newaudioBuffer = []
for i in range(len(audioBuffer)):
newaudioBuffer.append(audioBuffer[i][0])
newaudioBuffer = np.array(newaudioBuffer)
time = time[:len(newaudioBuffer)]
plt.plot(time, newaudioBuffer)
else:
time = time[:len(audioBuffer)]
plt.plot(time, audioBuffer)
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.title(myAudioFilename)
plt.show()
else:
popup = PopupWindow(self)
popup.setGeometry(400, 400, 500, 100)
popup.show()
self.Play_PlayButton.setChecked(False)
else:
popup = PopupWindow(self)
popup.setGeometry(400, 400, 500, 100)
popup.show()
self.Play_PlayButton.setChecked(False)
def createProgressBar(self):
self.progressBar = QProgressBar()
self.progressBar.setRange(0, 10000)
self.progressBar.setValue(0)
def advanceProgressBar(self):
self.curVal = self.progressBar.value()
maxVal = self.progressBar.maximum()
# print(maxVal)
self.progressBar.setValue(self.curVal + 1)
if self.curVal >= maxVal:
self.timer.stop()
self.progressBar.setValue(0)
self.curVal = 0
if self.curVal == 0:
self.RecordButton.setStyleSheet("background-color: normal")
self.recplay_RecordButton.setStyleSheet("background-\
color:normal")
self.recplay_RecordButton.setChecked(False)
self.Play_PlayButton.setChecked(False)
self.RecordButton.setChecked(False)
self.duration_input.setEnabled(True)
class PopupWindow(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.label = QLabel('No files has been chosen or its not a wav file\n \n \
please chose a wav file', self)
# self.label.setFont.("font-weight: bold")
self.setWindowTitle('WARNING')
class realtime_Plot(QDialog):
def __init__(self):
super(realtime_Plot, self).__init__()
self.setWindowTitle('Real Time Input Plot')
self.resize(800, 500)
# self.setWindowModality(Qt.ApplicationModal)
self.traces = dict()
self.color = (224, 223, 227)
self.win_1 = pg.GraphicsLayoutWidget()
self.win_1.setBackground(self.color)
self.win_2 = pg.GraphicsLayoutWidget()
self.win_2.setBackground(self.color)
self.wf_xlabels = [(0, '0'), (2048, '2048'), (4096, '4096')]
self.wf_xaxis = pg.AxisItem(orientation='bottom')
self.wf_xaxis.setTicks([self.wf_xlabels])
self.wf_ylabels = [(0, '0'), (-1, '-1'), (1, '1')]
self.wf_yaxis = pg.AxisItem(orientation='left')
self.wf_yaxis.setTicks([self.wf_ylabels])
self.sp_xlabels = [
(np.log10(20), '20'), (np.log10(50), '50'), (np.log10(100), '100'),
(np.log10(250), '250'), (np.log10(500), '500'),
(np.log10(1000), '1k'), (np.log10(4000), '4k'),
(np.log10(8000), '8k'), (np.log10(20000), '20k')]
self.sp_xaxis = pg.AxisItem(orientation='bottom')
self.sp_xaxis.setTicks([self.sp_xlabels])
self.sp_ylabels = [(0, '0'), (-1, '-12'), (-2, '-24'), (-3, '-48')]
self.sp_yaxis = pg.AxisItem(orientation='left')
self.sp_yaxis.setTicks([self.sp_ylabels])
self.waveform = self.win_1.addPlot(
title='Waveform', row=1, col=1, axisItems={'bottom': self.wf_xaxis,
'left': self.wf_yaxis},)
self.spectrum = self.win_2.addPlot(
title='Spectrum', row=2, col=1, axisItems={'bottom': self.sp_xaxis,
'left': self.sp_yaxis},)
self.waveform.showGrid(x=True, y=True, alpha=0.3)
self.waveform.setMouseEnabled(x=False, y=False)
self.spectrum.showGrid(x=True, y=True, alpha=0.3)
self.spectrum.setMouseEnabled(x=False, y=False)
layout = QHBoxLayout()
layout.addWidget(self.win_1)
layout.addWidget(self.win_2)
self.setLayout(layout)
# ------------------------------- just for test ------------------------
# pyaudio should be deleted
self.FORMAT = pyaudio.paInt16
self.CHANNELS = 1
self.RATE = 44100
self.CHUNK = 1024 * 2
self.p = pyaudio.PyAudio()
self.stream = self.p.open(
format=self.FORMAT,
channels=self.CHANNELS,
rate=self.RATE,
input=True,
output=True,
frames_per_buffer=self.CHUNK,
)
# waveform and spectrum x points
self.x = np.arange(0, 2 * self.CHUNK, 2)
self.f = np.linspace(0, 20000, 1024)
def start(self):
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
def set_plotdata(self, name, data_x, data_y):
if name in self.traces:
self.traces[name].setData(data_x, data_y)
else:
if name == 'waveform':
self.traces[name] = self.waveform.plot(pen='b', width=4)
self.waveform.setYRange(-1, 1, padding=0.05)
self.waveform.setXRange(0, 2 * self.CHUNK, padding=0.05)
if name == 'spectrum':
self.traces[name] = self.spectrum.plot(pen='m', width=4)
self.spectrum.setLogMode(x=True, y=True)
self.spectrum.setYRange(-4, 0, padding=0.05)
self.spectrum.setXRange(
np.log10(20), np.log10(self.RATE / 2), padding=0.05)
def update(self):
self.wf_data = self.stream.read(self.CHUNK)
self.wf_data = struct.unpack(str(2 * self.CHUNK) + 'B', self.wf_data)
self.wf_data = np.array(self.wf_data, dtype='b')[::2] + 128
self.wf_data_ip = np.interp(self.wf_data,
(self.wf_data.min(),
self.wf_data.max()),
(-1, +1))
self.set_plotdata(name='waveform',
data_x=self.x,
data_y=self.wf_data_ip,)
self.sp_data = fft(np.array(self.wf_data, dtype='int8') - 128)
self.sp_data = np.abs(self.sp_data[0:int(self.CHUNK / 2)]) * 2 / \
(128 * self.CHUNK)
self.set_plotdata(name='spectrum', data_x=self.f, data_y=self.sp_data)
# -----------------------------------------------------------------------------
def animation(self):
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update)
self.timer.start(20)
self.start()
# self.setLayout(self.layout)
self.exec_()
class settings(QDialog):
"""Dialog window for choosing sound device."""
def __init__(self):
super(settings, self).__init__()
self.setWindowTitle('Settings')
# self.b1 = QPushButton("ok", self)
# self.b1.move(50, 50)
self.resize(200, 200)
# self.move(650, 450)
self.setWindowModality(Qt.ApplicationModal)
self.layout = QGridLayout()
# Create central Widget
self.centralWidget = QWidget(self)
# Create combobox and add available Host APIs
self.host_label = QLabel('Host APis:')
self.host = QComboBox(self.centralWidget)
self.host_label.setBuddy(self.host)
self.host.setToolTip('This are the HOST APIs:')
for hostapi in sd.query_hostapis():
self.host.addItem(hostapi['name'])
self.layout.addWidget(self.host_label, 0, 0)
self.layout.addWidget(self.host, 0, 1)
self.host.currentTextChanged.connect(self.host_changed)
# create combobox and add available inputs
self.inputs_label = QLabel('Input:')
self.inputs = QComboBox(self.centralWidget)
self.inputs_label.setBuddy(self.inputs)
self.inputs.setToolTip('Choose your sound device input channels')
self.hostapi = sd.query_hostapis(self.host.currentIndex())
for idx in self.hostapi['devices']:
if sd.query_devices(idx)['max_input_channels'] > 0:
self.inputs.addItem(sd.query_devices(idx)['name'])
self.inputs.currentTextChanged.connect(self.input_changed)
self.layout.addWidget(self.inputs_label, 1, 0)
self.layout.addWidget(self.inputs, 1, 1)
# create combobox and add available outputs
self.outputs_label = QLabel('Outputs:')
self.outputs = QComboBox(self.centralWidget)
self.outputs_label.setBuddy(self.outputs)
self.outputs.setToolTip('Choose your sound device output channels')
self.hostapi = sd.query_hostapis(self.host.currentIndex())
for idx in self.hostapi['devices']:
if sd.query_devices(idx)['max_output_channels'] > 0:
self.outputs.addItem(sd.query_devices(idx)['name'])
self.layout.addWidget(self.outputs_label, 2, 0)
self.layout.addWidget(self.outputs, 2, 1)
self.outputs.currentTextChanged.connect(self.output_changed)
self.setLayout(self.layout)
self.exec_()
def get_input_id_by_name(self, channel_name):
devices_list = sd.query_devices()
for index, device_msg_dict in enumerate(devices_list):
if channel_name == device_msg_dict["name"] and \
device_msg_dict["max_input_channels"] > 0:
return index
else:
raise ValueError("cannot find the input channel")
def get_output_id_by_name(self, channel_name):
devices_list = sd.query_devices()
for index, device_msg_dict in enumerate(devices_list):
if channel_name == device_msg_dict["name"] and \
device_msg_dict["max_output_channels"] > 0:
return index
else:
raise ValueError("cannot find the output channel")
def host_changed(self, host):
# set host comman not found
print("Sound device(host) is alread changed to xxx")
def input_changed(self, input_name):
input_id = self.get_input_id_by_name(input_name)
sd.default.device[0] = input_id # index 1 is output, index 0 is input
print("inputs changed:", input_name)
def output_changed(self, output_name):
output_id = self.get_output_id_by_name(output_name)
sd.default.device[1] = output_id # index 1 is output, index 0 is input
print("outputs changed:", output_name)
# @pyqtSlot()
# def recordButton_on_clicked(filesname, device_out, channels_out):
# playsi = Play(filesname,device_out=device_out, channels_out=channels_out)
# playsi.play()
@pyqtSlot()
def PlotButton_on_clicked():
plot = realtime_Plot()
plot.show()
plot.animation()
@pyqtSlot()
def settingButton_on_click():
settings()
if __name__ == '__main__':
app = QApplication(sys.argv)
mainGui = WidgetGallery()
mainGui.show()
# mainGui.animation()
sys.exit(app.exec_())
| [
"matplotlib.pyplot.title",
"PyQt5.QtWidgets.QApplication.palette",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QPushButton",
"scipy.io.wavfile.read",
"numpy.arange",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QApplication.setAttribute",
"PyQt5.QtWidgets.QTabWidget",
"os.path.join",
"py... | [((1407, 1483), 'PyQt5.QtWidgets.QApplication.setAttribute', 'QtWidgets.QApplication.setAttribute', (['QtCore.Qt.AA_EnableHighDpiScaling', '(True)'], {}), '(QtCore.Qt.AA_EnableHighDpiScaling, True)\n', (1442, 1483), False, 'from PyQt5 import QtWidgets\n'), ((1504, 1577), 'PyQt5.QtWidgets.QApplication.setAttribute', 'QtWidgets.QApplication.setAttribute', (['QtCore.Qt.AA_UseHighDpiPixmaps', '(True)'], {}), '(QtCore.Qt.AA_UseHighDpiPixmaps, True)\n', (1539, 1577), False, 'from PyQt5 import QtWidgets\n'), ((25004, 25014), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (25012, 25014), False, 'from PyQt5.QtCore import Qt, QTimer, pyqtSlot\n'), ((25111, 25121), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (25119, 25121), False, 'from PyQt5.QtCore import Qt, QTimer, pyqtSlot\n'), ((25207, 25229), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (25219, 25229), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((1781, 1803), 'PyQt5.QtWidgets.QApplication.palette', 'QApplication.palette', ([], {}), '()\n', (1801, 1803), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((1826, 1861), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Gruppe 6 - Audio Devices:"""'], {}), "('Gruppe 6 - Audio Devices:')\n", (1832, 1861), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((1887, 1910), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Settings"""'], {}), "('Settings')\n", (1898, 1910), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((2115, 2128), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (2126, 2128), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((2267, 2280), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (2278, 2280), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((2757, 2788), 'PyQt5.QtWidgets.QApplication.setStyle', 'QApplication.setStyle', (['"""Fusion"""'], {}), "('Fusion')\n", (2778, 2788), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((2845, 2864), 'PyQt5.QtWidgets.QGroupBox', 'QGroupBox', (['"""Record"""'], {}), "('Record')\n", (2854, 2864), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((2978, 2991), 'PyQt5.QtWidgets.QFormLayout', 'QFormLayout', ([], {}), '()\n', (2989, 2991), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((3022, 3035), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['""""""'], {}), "('')\n", (3031, 3035), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((3172, 3193), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Record"""'], {}), "('Record')\n", (3183, 3193), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((3516, 3535), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Stop"""'], {}), "('Stop')\n", (3527, 3535), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((3663, 3682), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Plot"""'], {}), "('Plot')\n", (3674, 3682), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((3759, 3772), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (3770, 3772), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((4525, 4537), 'PyQt5.QtCore.QTimer', 'QTimer', (['self'], {}), '(self)\n', (4531, 4537), False, 'from PyQt5.QtCore import Qt, QTimer, pyqtSlot\n'), ((4654, 4687), 'haiopy.Record', 'haiopy.Record', (['"""wav"""'], {'device_in': '(0)'}), "('wav', device_in=0)\n", (4667, 4687), True, 'import haiopy as haiopy\n'), ((5256, 5268), 'PyQt5.QtWidgets.QTabWidget', 'QTabWidget', ([], {}), '()\n', (5266, 5268), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((5417, 5426), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (5424, 5426), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((5450, 5469), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Open"""'], {}), "('Open')\n", (5461, 5469), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((5593, 5606), 'PyQt5.QtWidgets.QFormLayout', 'QFormLayout', ([], {}), '()\n', (5604, 5606), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((5638, 5660), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['"""file path"""'], {}), "('file path')\n", (5647, 5660), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((5801, 5820), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Play"""'], {}), "('Play')\n", (5812, 5820), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((6094, 6113), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Stop"""'], {}), "('Stop')\n", (6105, 6113), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((6266, 6285), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Plot"""'], {}), "('Plot')\n", (6277, 6285), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((6368, 6381), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (6379, 6381), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((6733, 6742), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (6740, 6742), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((6774, 6793), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Open"""'], {}), "('Open')\n", (6785, 6793), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((6942, 6955), 'PyQt5.QtWidgets.QFormLayout', 'QFormLayout', ([], {}), '()\n', (6953, 6955), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((6995, 7017), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['"""file path"""'], {}), "('file path')\n", (7004, 7017), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((7235, 7256), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Record"""'], {}), "('Record')\n", (7246, 7256), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((7609, 7628), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Stop"""'], {}), "('Stop')\n", (7620, 7628), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((7784, 7797), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (7795, 7797), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((8360, 8381), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (8379, 8381), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((8457, 8716), 'PyQt5.QtWidgets.QFileDialog.getOpenFileNames', 'QFileDialog.getOpenFileNames', (['self', '"""QFileDialog. getOpenFileNames()"""', '""""""', '"""All Files (*);; Python Files (*.py)"""'], {'options': 'options'}), "(self,\n 'QFileDialog. getOpenFileNames()'\n , '',\n 'All Files (*);; Python Files (*.py)'\n , options=options)\n", (8485, 8716), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((9191, 9212), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (9210, 9212), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((9288, 9547), 'PyQt5.QtWidgets.QFileDialog.getOpenFileNames', 'QFileDialog.getOpenFileNames', (['self', '"""QFileDialog. getOpenFileNames()"""', '""""""', '"""All Files (*);; Python Files (*.py)"""'], {'options': 'options'}), "(self,\n 'QFileDialog. getOpenFileNames()'\n , '',\n 'All Files (*);; Python Files (*.py)'\n , options=options)\n", (9316, 9547), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((14925, 14939), 'PyQt5.QtWidgets.QProgressBar', 'QProgressBar', ([], {}), '()\n', (14937, 14939), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((15946, 16077), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""No files has been chosen or its not a wav file\n \n please chose a wav file"""', 'self'], {}), '(\n """No files has been chosen or its not a wav file\n \n please chose a wav file"""\n , self)\n', (15952, 16077), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((16485, 16510), 'pyqtgraph.GraphicsLayoutWidget', 'pg.GraphicsLayoutWidget', ([], {}), '()\n', (16508, 16510), True, 'import pyqtgraph as pg\n'), ((16577, 16602), 'pyqtgraph.GraphicsLayoutWidget', 'pg.GraphicsLayoutWidget', ([], {}), '()\n', (16600, 16602), True, 'import pyqtgraph as pg\n'), ((16742, 16775), 'pyqtgraph.AxisItem', 'pg.AxisItem', ([], {'orientation': '"""bottom"""'}), "(orientation='bottom')\n", (16753, 16775), True, 'import pyqtgraph as pg\n'), ((16910, 16941), 'pyqtgraph.AxisItem', 'pg.AxisItem', ([], {'orientation': '"""left"""'}), "(orientation='left')\n", (16921, 16941), True, 'import pyqtgraph as pg\n'), ((17307, 17340), 'pyqtgraph.AxisItem', 'pg.AxisItem', ([], {'orientation': '"""bottom"""'}), "(orientation='bottom')\n", (17318, 17340), True, 'import pyqtgraph as pg\n'), ((17492, 17523), 'pyqtgraph.AxisItem', 'pg.AxisItem', ([], {'orientation': '"""left"""'}), "(orientation='left')\n", (17503, 17523), True, 'import pyqtgraph as pg\n'), ((18231, 18244), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (18242, 18244), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((18592, 18609), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (18607, 18609), False, 'import pyaudio\n'), ((18901, 18932), 'numpy.arange', 'np.arange', (['(0)', '(2 * self.CHUNK)', '(2)'], {}), '(0, 2 * self.CHUNK, 2)\n', (18910, 18932), True, 'import numpy as np\n'), ((18950, 18977), 'numpy.linspace', 'np.linspace', (['(0)', '(20000)', '(1024)'], {}), '(0, 20000, 1024)\n', (18961, 18977), True, 'import numpy as np\n'), ((20832, 20847), 'pyqtgraph.Qt.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (20845, 20847), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((21399, 21412), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (21410, 21412), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((21475, 21488), 'PyQt5.QtWidgets.QWidget', 'QWidget', (['self'], {}), '(self)\n', (21482, 21488), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((21570, 21590), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Host APis:"""'], {}), "('Host APis:')\n", (21576, 21590), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((21611, 21640), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', (['self.centralWidget'], {}), '(self.centralWidget)\n', (21620, 21640), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((21764, 21783), 'sounddevice.query_hostapis', 'sd.query_hostapis', ([], {}), '()\n', (21781, 21783), True, 'import sounddevice as sd\n'), ((22076, 22092), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Input:"""'], {}), "('Input:')\n", (22082, 22092), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((22115, 22144), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', (['self.centralWidget'], {}), '(self.centralWidget)\n', (22124, 22144), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((22762, 22780), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Outputs:"""'], {}), "('Outputs:')\n", (22768, 22780), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((22804, 22833), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', (['self.centralWidget'], {}), '(self.centralWidget)\n', (22813, 22833), False, 'from PyQt5.QtWidgets import QApplication, QComboBox, QDialog, QGridLayout, QTabWidget, QProgressBar, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QPushButton, QWidget, QFormLayout, QFileDialog, QSizePolicy\n'), ((23511, 23529), 'sounddevice.query_devices', 'sd.query_devices', ([], {}), '()\n', (23527, 23529), True, 'import sounddevice as sd\n'), ((23908, 23926), 'sounddevice.query_devices', 'sd.query_devices', ([], {}), '()\n', (23924, 23926), True, 'import sounddevice as sd\n'), ((10317, 10455), 'haiopy.PlayRecord', 'haiopy.PlayRecord', ([], {'audio_in': '"""wav"""', 'audio_out': 'fi', 'device_in': 'sd.default.device[0]', 'device_out': 'sd.default.device[1]', 'sampling_rate': '(44100)'}), "(audio_in='wav', audio_out=fi, device_in=sd.default.device\n [0], device_out=sd.default.device[1], sampling_rate=44100)\n", (10334, 10455), True, 'import haiopy as haiopy\n'), ((10808, 10820), 'PyQt5.QtCore.QTimer', 'QTimer', (['self'], {}), '(self)\n', (10814, 10820), False, 'from PyQt5.QtCore import Qt, QTimer, pyqtSlot\n'), ((11828, 11888), 'haiopy.Play', 'haiopy.Play', (['self.filesName'], {'device_out': 'sd.default.device[1]'}), '(self.filesName, device_out=sd.default.device[1])\n', (11839, 11888), True, 'import haiopy as haiopy\n'), ((12063, 12075), 'PyQt5.QtCore.QTimer', 'QTimer', (['self'], {}), '(self)\n', (12069, 12075), False, 'from PyQt5.QtCore import Qt, QTimer, pyqtSlot\n'), ((13521, 13562), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '"""audio"""'], {}), "(os.environ['HOME'], 'audio')\n", (13533, 13562), False, 'import os\n'), ((13590, 13633), 'os.path.join', 'os.path.join', (['dataset_path', 'myAudioFilename'], {}), '(dataset_path, myAudioFilename)\n', (13602, 13633), False, 'import os\n'), ((13677, 13699), 'scipy.io.wavfile.read', 'wavfile.read', (['wavedata'], {}), '(wavedata)\n', (13689, 13699), True, 'import scipy.io.wavfile as wavfile\n'), ((13808, 13846), 'numpy.arange', 'np.arange', (['(0)', 'duration', '(1 / sampleRate)'], {}), '(0, duration, 1 / sampleRate)\n', (13817, 13846), True, 'import numpy as np\n'), ((14354, 14376), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (14364, 14376), True, 'import matplotlib.pyplot as plt\n'), ((14393, 14416), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (14403, 14416), True, 'import matplotlib.pyplot as plt\n'), ((14433, 14459), 'matplotlib.pyplot.title', 'plt.title', (['myAudioFilename'], {}), '(myAudioFilename)\n', (14442, 14459), True, 'import matplotlib.pyplot as plt\n'), ((14476, 14486), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14484, 14486), True, 'import matplotlib.pyplot as plt\n'), ((17034, 17046), 'numpy.log10', 'np.log10', (['(20)'], {}), '(20)\n', (17042, 17046), True, 'import numpy as np\n'), ((17056, 17068), 'numpy.log10', 'np.log10', (['(50)'], {}), '(50)\n', (17064, 17068), True, 'import numpy as np\n'), ((17078, 17091), 'numpy.log10', 'np.log10', (['(100)'], {}), '(100)\n', (17086, 17091), True, 'import numpy as np\n'), ((17114, 17127), 'numpy.log10', 'np.log10', (['(250)'], {}), '(250)\n', (17122, 17127), True, 'import numpy as np\n'), ((17138, 17151), 'numpy.log10', 'np.log10', (['(500)'], {}), '(500)\n', (17146, 17151), True, 'import numpy as np\n'), ((17174, 17188), 'numpy.log10', 'np.log10', (['(1000)'], {}), '(1000)\n', (17182, 17188), True, 'import numpy as np\n'), ((17198, 17212), 'numpy.log10', 'np.log10', (['(4000)'], {}), '(4000)\n', (17206, 17212), True, 'import numpy as np\n'), ((17234, 17248), 'numpy.log10', 'np.log10', (['(8000)'], {}), '(8000)\n', (17242, 17248), True, 'import numpy as np\n'), ((17258, 17273), 'numpy.log10', 'np.log10', (['(20000)'], {}), '(20000)\n', (17266, 17273), True, 'import numpy as np\n'), ((20039, 20072), 'numpy.array', 'np.array', (['self.wf_data'], {'dtype': '"""b"""'}), "(self.wf_data, dtype='b')\n", (20047, 20072), True, 'import numpy as np\n'), ((20458, 20494), 'numpy.array', 'np.array', (['self.wf_data'], {'dtype': '"""int8"""'}), "(self.wf_data, dtype='int8')\n", (20466, 20494), True, 'import numpy as np\n'), ((14087, 14111), 'numpy.array', 'np.array', (['newaudioBuffer'], {}), '(newaudioBuffer)\n', (14095, 14111), True, 'import numpy as np\n'), ((14186, 14216), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'newaudioBuffer'], {}), '(time, newaudioBuffer)\n', (14194, 14216), True, 'import matplotlib.pyplot as plt\n'), ((14310, 14337), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'audioBuffer'], {}), '(time, audioBuffer)\n', (14318, 14337), True, 'import matplotlib.pyplot as plt\n'), ((19092, 19121), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (19119, 19121), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((19810, 19822), 'numpy.log10', 'np.log10', (['(20)'], {}), '(20)\n', (19818, 19822), True, 'import numpy as np\n'), ((19824, 19847), 'numpy.log10', 'np.log10', (['(self.RATE / 2)'], {}), '(self.RATE / 2)\n', (19832, 19847), True, 'import numpy as np\n'), ((22393, 22414), 'sounddevice.query_devices', 'sd.query_devices', (['idx'], {}), '(idx)\n', (22409, 22414), True, 'import sounddevice as sd\n'), ((23086, 23107), 'sounddevice.query_devices', 'sd.query_devices', (['idx'], {}), '(idx)\n', (23102, 23107), True, 'import sounddevice as sd\n'), ((22478, 22499), 'sounddevice.query_devices', 'sd.query_devices', (['idx'], {}), '(idx)\n', (22494, 22499), True, 'import sounddevice as sd\n'), ((23173, 23194), 'sounddevice.query_devices', 'sd.query_devices', (['idx'], {}), '(idx)\n', (23189, 23194), True, 'import sounddevice as sd\n')] |
from scipy.spatial.distance import cdist
import numpy as np
class DBScan():
"""This is a simple implementation of clustering using the
DBScan algorithm.
My algorithm will return labels for clusters, -1 indicating
an outlier"""
def __init__(self, max_dist, min_pts):
self.data = None
self.max_dist = max_dist
self.min_pts = min_pts
self.labels = None
"""The following list will hold the final label assignments
I'll initialize it with 0's, but cluster assignment will start
at 1"""
"""Next for each point P in data, I'll run a function to
determine if a point is a valid seed, then fill out the cluster
with reachable points"""
def fit(self, data):
self.data = data
self.labels = [0] * len(self.data)
cluster = 0
for P in range(0,len(self.data)):
reachable = self.find_reachable(P)
"""If a point isn't a valid seed, it's an outlier, this is the only
condition when a label is set to outlier it may still be claimed
as a boundary point for a cluster later"""
if len(reachable)<self.min_pts:
self.labels[P] = -1
elif self.labels[P]==0:
cluster+=1
self.create_cluster(P, cluster, reachable)
return self.labels
def predict(self, P):
"""Given a new point of data, P assign it a cluster label"""
for i in range(0, len, self.data):
if cdist(np.reshape(P,(-1,2)), np.reshape(self.data[i],(-1,2))) < self.max_dist:
return self.labels[i]
def create_cluster(self, P, cluster, reachable):
"""Given a valid seed point, create the cluster with
every point that belongs according to distance threshold"""
self.labels[P] = cluster
"""Run a while loop, to step through each point in our seed's
list of reachable, checking for each of their neighbors, adding them
to this cluster, if they aren't already in another cluster"""
i=0
while i < len(reachable):
next_point = reachable[i]
#If the label was previously noise, it's not a valid branch
#So we'll just add it to the cluster and move on
if self.labels[next_point] == -1:
self.labels[next_point] = cluster
#If the point was unclaimed, let's claim it, and grow from there
elif self.labels[next_point] == 0:
self.labels[next_point] = cluster
next_point_reachable = self.find_reachable(next_point)
#If this point is a valid branch, let's get it's neighbors in here too
if len(next_point_reachable)>self.min_pts:
reachable = reachable + next_point_reachable
i+=1
def find_reachable(self, P):
"""The following function will take a point in data
and find reachable points from it"""
reachable = []
for i in range(0, len(self.data)):
if cdist(np.reshape(self.data[P],(-1,2)), np.reshape(self.data[i],(-1,2)))<self.max_dist:
"""If the distance between the point and a given point in data
let's add it to a list of reachable points"""
reachable.append(i)
return reachable | [
"numpy.reshape"
] | [((1520, 1542), 'numpy.reshape', 'np.reshape', (['P', '(-1, 2)'], {}), '(P, (-1, 2))\n', (1530, 1542), True, 'import numpy as np\n'), ((1542, 1575), 'numpy.reshape', 'np.reshape', (['self.data[i]', '(-1, 2)'], {}), '(self.data[i], (-1, 2))\n', (1552, 1575), True, 'import numpy as np\n'), ((3100, 3133), 'numpy.reshape', 'np.reshape', (['self.data[P]', '(-1, 2)'], {}), '(self.data[P], (-1, 2))\n', (3110, 3133), True, 'import numpy as np\n'), ((3133, 3166), 'numpy.reshape', 'np.reshape', (['self.data[i]', '(-1, 2)'], {}), '(self.data[i], (-1, 2))\n', (3143, 3166), True, 'import numpy as np\n')] |
from pathlib import Path
import random
from typing import Optional
import numpy as np
import ase
from ase.utils.ff import Morse, Angle, Dihedral, VdW
from ase.calculators.ff import ForceField
from ase.optimize.precon import FF, PreconLBFGS
from ase.optimize.precon.neighbors import get_neighbours
from rdkit import Chem
from rdkit.Chem.AllChem import (
EmbedMultipleConfs, GetBestRMS,
MMFFGetMoleculeForceField, MMFFGetMoleculeProperties,
UFFGetMoleculeForceField,
)
from . import settings
def guess_conformer(mol: Chem.rdchem.Mol,
attempts: int = 50) -> Chem.rdchem.Conformer:
max_attempts = attempts * 25
EmbedMultipleConfs(mol,
numConfs=attempts,
maxAttempts=max_attempts,
useRandomCoords=True,
randomSeed=random.randint(1, 10000000))
num_confs = len(mol.GetConformers())
if not num_confs:
raise ValueError(f"No conformer with {max_attempts} attemps")
conf_energies = []
for i in range(0, num_confs):
try:
props = MMFFGetMoleculeProperties(mol)
potential = MMFFGetMoleculeForceField(mol, props, confId=i)
if potential is None:
potential = UFFGetMoleculeForceField(mol, confId=i)
potential.Minimize()
ff_energy = potential.CalcEnergy()
conf_energies.append((i, ff_energy))
except:
continue
min_id = sorted(conf_energies, key=lambda x: x[1])[0][0]
conf = mol.GetConformer(id=min_id)
return conf
def make_atoms(mol: Chem.rdchem.Mol) -> ase.Atoms:
conf = guess_conformer(mol)
mol_atoms = mol.GetAtoms()
atoms = ase.Atoms()
for an in range(0, conf.GetNumAtoms()):
a = mol_atoms[an].GetSymbol()
p = conf.GetAtomPosition(an)
atoms.append(ase.Atom(a, [p.x, p.y, p.z]))
return atoms
def get_atoms(smiles: str) -> ase.Atoms:
mol = Chem.MolFromSmiles(smiles)
mol = Chem.AddHs(mol)
atoms = make_atoms(mol)
return atoms
def calc_ff(atoms: ase.Atoms) -> tuple[ForceField, FF]:
neighbor_list = [[] for _ in range(len(atoms))]
vdw_list = np.ones((len(atoms), len(atoms)), dtype=bool)
morses = []; angles = []; dihedrals = []; vdws = []
i_list, j_list, d_list, fixed_atoms = get_neighbours(atoms=atoms, r_cut=1.5)
for i, j in zip(i_list, j_list):
neighbor_list[i].append(j)
for i in range(len(neighbor_list)):
neighbor_list[i].sort()
for i in range(len(atoms)):
for jj in range(len(neighbor_list[i])):
j = neighbor_list[i][jj]
if j > i:
morses.append(Morse(atomi=i, atomj=j, D=6.1322, alpha=1.8502, r0=1.4322))
vdw_list[i, j] = vdw_list[j, i] = False
for kk in range(jj+1, len(neighbor_list[i])):
k = neighbor_list[i][kk]
angles.append(Angle(atomi=j, atomj=i, atomk=k, k=10.0, a0=np.deg2rad(120.0), cos=True))
vdw_list[j, k] = vdw_list[k, j] = False
for ll in range(kk+1, len(neighbor_list[i])):
l = neighbor_list[i][ll]
dihedrals.append(Dihedral(atomi=j, atomj=i, atomk=k, atoml=l, k=0.346))
for i in range(len(atoms)):
for j in range(i+1, len(atoms)):
if vdw_list[i, j]:
vdws.append(VdW(atomi=i, atomj=j, epsilonij=0.0115, rminij=3.4681))
return (
ForceField(morses=morses, angles=angles, dihedrals=dihedrals, vdws=vdws),
FF(morses=morses, angles=angles, dihedrals=dihedrals),
)
def pre_optimize(atoms: ase.Atoms, fmax: float = 1e-4) -> None:
logfile = Path(settings.SCRATCH_PATH).joinpath("temp.pre")
calc, precon = calc_ff(atoms)
#_calc = atoms.calc
atoms.calc = calc
opt = PreconLBFGS(atoms, precon=precon, use_armijo=True, logfile=logfile)
opt.run(fmax=fmax)
#atoms.calc = _calc
return atoms
| [
"random.randint",
"numpy.deg2rad",
"ase.optimize.precon.neighbors.get_neighbours",
"ase.optimize.precon.PreconLBFGS",
"rdkit.Chem.AllChem.MMFFGetMoleculeForceField",
"ase.Atom",
"ase.utils.ff.Dihedral",
"ase.calculators.ff.ForceField",
"pathlib.Path",
"ase.utils.ff.Morse",
"ase.optimize.precon.F... | [((1718, 1729), 'ase.Atoms', 'ase.Atoms', ([], {}), '()\n', (1727, 1729), False, 'import ase\n'), ((1970, 1996), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (1988, 1996), False, 'from rdkit import Chem\n'), ((2007, 2022), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['mol'], {}), '(mol)\n', (2017, 2022), False, 'from rdkit import Chem\n'), ((2338, 2376), 'ase.optimize.precon.neighbors.get_neighbours', 'get_neighbours', ([], {'atoms': 'atoms', 'r_cut': '(1.5)'}), '(atoms=atoms, r_cut=1.5)\n', (2352, 2376), False, 'from ase.optimize.precon.neighbors import get_neighbours\n'), ((3836, 3903), 'ase.optimize.precon.PreconLBFGS', 'PreconLBFGS', (['atoms'], {'precon': 'precon', 'use_armijo': '(True)', 'logfile': 'logfile'}), '(atoms, precon=precon, use_armijo=True, logfile=logfile)\n', (3847, 3903), False, 'from ase.optimize.precon import FF, PreconLBFGS\n'), ((3472, 3544), 'ase.calculators.ff.ForceField', 'ForceField', ([], {'morses': 'morses', 'angles': 'angles', 'dihedrals': 'dihedrals', 'vdws': 'vdws'}), '(morses=morses, angles=angles, dihedrals=dihedrals, vdws=vdws)\n', (3482, 3544), False, 'from ase.calculators.ff import ForceField\n'), ((3554, 3607), 'ase.optimize.precon.FF', 'FF', ([], {'morses': 'morses', 'angles': 'angles', 'dihedrals': 'dihedrals'}), '(morses=morses, angles=angles, dihedrals=dihedrals)\n', (3556, 3607), False, 'from ase.optimize.precon import FF, PreconLBFGS\n'), ((847, 874), 'random.randint', 'random.randint', (['(1)', '(10000000)'], {}), '(1, 10000000)\n', (861, 874), False, 'import random\n'), ((1101, 1131), 'rdkit.Chem.AllChem.MMFFGetMoleculeProperties', 'MMFFGetMoleculeProperties', (['mol'], {}), '(mol)\n', (1126, 1131), False, 'from rdkit.Chem.AllChem import EmbedMultipleConfs, GetBestRMS, MMFFGetMoleculeForceField, MMFFGetMoleculeProperties, UFFGetMoleculeForceField\n'), ((1156, 1203), 'rdkit.Chem.AllChem.MMFFGetMoleculeForceField', 'MMFFGetMoleculeForceField', (['mol', 'props'], {'confId': 'i'}), '(mol, props, confId=i)\n', (1181, 1203), False, 'from rdkit.Chem.AllChem import EmbedMultipleConfs, GetBestRMS, MMFFGetMoleculeForceField, MMFFGetMoleculeProperties, UFFGetMoleculeForceField\n'), ((1870, 1898), 'ase.Atom', 'ase.Atom', (['a', '[p.x, p.y, p.z]'], {}), '(a, [p.x, p.y, p.z])\n', (1878, 1898), False, 'import ase\n'), ((3695, 3722), 'pathlib.Path', 'Path', (['settings.SCRATCH_PATH'], {}), '(settings.SCRATCH_PATH)\n', (3699, 3722), False, 'from pathlib import Path\n'), ((1266, 1305), 'rdkit.Chem.AllChem.UFFGetMoleculeForceField', 'UFFGetMoleculeForceField', (['mol'], {'confId': 'i'}), '(mol, confId=i)\n', (1290, 1305), False, 'from rdkit.Chem.AllChem import EmbedMultipleConfs, GetBestRMS, MMFFGetMoleculeForceField, MMFFGetMoleculeProperties, UFFGetMoleculeForceField\n'), ((2691, 2749), 'ase.utils.ff.Morse', 'Morse', ([], {'atomi': 'i', 'atomj': 'j', 'D': '(6.1322)', 'alpha': '(1.8502)', 'r0': '(1.4322)'}), '(atomi=i, atomj=j, D=6.1322, alpha=1.8502, r0=1.4322)\n', (2696, 2749), False, 'from ase.utils.ff import Morse, Angle, Dihedral, VdW\n'), ((3394, 3448), 'ase.utils.ff.VdW', 'VdW', ([], {'atomi': 'i', 'atomj': 'j', 'epsilonij': '(0.0115)', 'rminij': '(3.4681)'}), '(atomi=i, atomj=j, epsilonij=0.0115, rminij=3.4681)\n', (3397, 3448), False, 'from ase.utils.ff import Morse, Angle, Dihedral, VdW\n'), ((3206, 3259), 'ase.utils.ff.Dihedral', 'Dihedral', ([], {'atomi': 'j', 'atomj': 'i', 'atomk': 'k', 'atoml': 'l', 'k': '(0.346)'}), '(atomi=j, atomj=i, atomk=k, atoml=l, k=0.346)\n', (3214, 3259), False, 'from ase.utils.ff import Morse, Angle, Dihedral, VdW\n'), ((2976, 2993), 'numpy.deg2rad', 'np.deg2rad', (['(120.0)'], {}), '(120.0)\n', (2986, 2993), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
In this script we find outlier traces based on their distance from outhers.
Every trace isbecomivng a vector which contain information both from the
structure and times. Then based on the k and r we found the outlyiers.
"""
import datetime
from pm4py.algo.filtering.log.attributes import attributes_filter as log_attributes_filter
from sklearn.preprocessing import StandardScaler
import numpy as np
from pm4py.objects.log.importer.xes import factory as xes_factory
from mtree import MTree
import math
import os
def dataPreprocess(log):
"""
In this function data from log, will be transformed to a vector
"""
activities_all = log_attributes_filter.get_attribute_values(log, "concept:name")
activities=list(activities_all.keys())
dataVectors=[]
times=[[] for i in range(len(activities))]
for trace in log:
activitiesCounter=[0 for i in range(len(activities))]
timesSpend=[datetime.timedelta(0) for i in range(len(activities))]
previousTime=trace.attributes["REG_DATE"]
for index,event in enumerate(trace):
indexActivity=activities.index(event["concept:name"])
activitiesCounter[indexActivity]+=1
timesSpend[indexActivity]+=event["time:timestamp"]-previousTime
times[indexActivity].append(event["time:timestamp"]-previousTime)
previousTime=event["time:timestamp"]
timesSpend=[(timesSpend[i]/activitiesCounter[i]).total_seconds() if activitiesCounter[i]!=0 else 0 for i in range(len(activities))] #contains the mo of all the activities
dataVectors.append(activitiesCounter+timesSpend)
return dataVectors,times,activities
def transform(dataVectors):
"""
Data will be standarized so every attribute can contribute the same to the distance
"""
transposeVector=[[data[i] for data in dataVectors] for i in range(len(dataVectors[0]))]
standarizedData=[]
for field in transposeVector:
y=np.array(field).reshape(-1,1)
sc=StandardScaler()
sc.fit(y)
standarizedData.append(sc.transform(y))
transformedData=[[float(i) for i in k] for k in standarizedData]
return [[data[i] for data in transformedData] for i in range(len(transformedData[0]))]
def distanceMtree(v1,v2):
"""
This is the function that calculated the distance between 2 elements
in the M-Tree
"""
v1List=[float(i) for i in v1[1:-1].split(",")]
v2List=[float(i) for i in v2[1:-1].split(",")]
rmse=0
for index in range(len(v1List)):
rmse+=pow(v1List[index]-v2List[index],2)
return round(math.sqrt(rmse/len(v2List)),4)
def calculateQueries(mtree:MTree, dataVectors:list,K,R):
"""
If there is no previous data of calculated queries, or the value of
k and r are not combatable, this method will create the queries
in the M-Tree based on the given values
"""
queries=[]
for index,dataVector in enumerate(dataVectors):
print(index)
x=list(mtree.get_nearest(str(dataVector),range=R,limit=K))
m=[i[1] for i in x]
queries.append(m)
return queries
def writeTopKNeighborsToFile(preparedQueries,k,r,fileName):
"""
This method writes the prepared Queries in a file, so next time it will
not be needed to calculate them again
"""
newName=fileName+"-"+str(k)+"-"+str(r)+".txt"
with open(newName,"w") as f:
f.write(str(k)+","+str(r)+"\n")
for neighbors in preparedQueries:
for neighbor in neighbors:
f.write(str(neighbor)+" ")
f.write("\n")
def readFromFile(fileName):
"""
read the prepared Queries from the file
"""
preparedQueries=[]
with open(fileName,"r") as f:
k,r=map(int,f[0].spli(",")[:-1])
for index,line in enumerate(f[1:]):
print(index)
preparedQueries.append([float(i) for i in line.split(" ")[:-1]])
return k,r,preparedQueries
def outliersKNN(queries:list,R,K):
"""
Calculate the outliers based on the K and R
"""
outliers=[]
for index,i in enumerate(queries):
try:
if i[K+1]>R:
outliers.append(index)
except IndexError:
outliers.append(index)
return outliers
def outliers(logName,k,r,mtree,dataVectors):
nameFile=logName.split(".")[0]
fileFound=None
for filename in os.listdir('.'):
if filename.startswith(nameFile) and len(filename.split("."))==2 and filename.split(".")[1]=="txt":
name=filename.split(".")[0]
K,R=map(int,name.split("-")[1:])
if k<=K and r<=R:
fileFound=filename
break
preparedQueries=[]
if fileFound==None:
preparedQueries=calculateQueries(mtree,dataVectors,k,r)
writeTopKNeighborsToFile(preparedQueries,k,r)
else:
preparedQueries=readFromFile(fileFound)
return outliersKNN(preparedQueries,r,k)
def createMTree(dataVectorsStandarized):
"""
Add 1 by 1 all the vectors in the M-Tree
"""
myTree=MTree(distance_function=distanceMtree,min_node_capacity=50)
for index,vector in enumerate(dataVectorsStandarized):
print(index)
try:
myTree.add(str(vector))
except:
pass
return myTree
def getNeirestNeighbors(mtree:MTree, dataVectors:list,K):
"""
Get the distance for the closest neighbors for every trace
"""
queries=[]
for index,dataVector in enumerate(dataVectors):
print(index)
x=list(mtree.get_nearest(str(dataVector),limit=K))
m=[i[1] for i in x]
queries.append(m)
return queries
def main(logFile,k,r= None):
print("Loading data..")
log=xes_factory.apply(logFile)
print("Preprocess Data..")
dataVectors,statsTimes,activities=dataPreprocess(log)
dataVectorsStandarized=transform(dataVectors)
print("Creating Mtree...")
mtree=createMTree(dataVectorsStandarized)
if r==None:
print("Getting neirest neighbors")
return getNeirestNeighbors(mtree,dataVectorsStandarized,k)
else:
print("Find outliers based on given K and R")
return outliers(logFile,k,r,mtree,dataVectorsStandarized)
| [
"sklearn.preprocessing.StandardScaler",
"mtree.MTree",
"datetime.timedelta",
"numpy.array",
"pm4py.algo.filtering.log.attributes.attributes_filter.get_attribute_values",
"os.listdir",
"pm4py.objects.log.importer.xes.factory.apply"
] | [((699, 762), 'pm4py.algo.filtering.log.attributes.attributes_filter.get_attribute_values', 'log_attributes_filter.get_attribute_values', (['log', '"""concept:name"""'], {}), "(log, 'concept:name')\n", (741, 762), True, 'from pm4py.algo.filtering.log.attributes import attributes_filter as log_attributes_filter\n'), ((4479, 4494), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (4489, 4494), False, 'import os\n'), ((5170, 5230), 'mtree.MTree', 'MTree', ([], {'distance_function': 'distanceMtree', 'min_node_capacity': '(50)'}), '(distance_function=distanceMtree, min_node_capacity=50)\n', (5175, 5230), False, 'from mtree import MTree\n'), ((5839, 5865), 'pm4py.objects.log.importer.xes.factory.apply', 'xes_factory.apply', (['logFile'], {}), '(logFile)\n', (5856, 5865), True, 'from pm4py.objects.log.importer.xes import factory as xes_factory\n'), ((2058, 2074), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2072, 2074), False, 'from sklearn.preprocessing import StandardScaler\n'), ((976, 997), 'datetime.timedelta', 'datetime.timedelta', (['(0)'], {}), '(0)\n', (994, 997), False, 'import datetime\n'), ((2017, 2032), 'numpy.array', 'np.array', (['field'], {}), '(field)\n', (2025, 2032), True, 'import numpy as np\n')] |
# 3 - simple plot
"""
Please note, this script is for python3+.
If you are using python2+, please modify it accordingly.
"""
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-1, 1, 100)
y = 2*x + 1
plt.plot(x, y)
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.linspace"
] | [((182, 205), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(100)'], {}), '(-1, 1, 100)\n', (193, 205), True, 'import numpy as np\n'), ((218, 232), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (226, 232), True, 'import matplotlib.pyplot as plt\n'), ((233, 243), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (241, 243), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Two-dimensional simulation code for buoyancy driven convection below an
evaporating salt lake. Allows for input of different simulation parameters
(see main.py -h for all available options) as well as different boundary
conditions.
This source code is subject to the terms of the MIT license. If a copy of the
MIT license was not distributed with this file, you can obtain one at
https://opensource.org/licenses/MIT
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020, Geophysical pattern formation in salt playa'
__credits__ = ['<NAME>', '<NAME>']
__license__ = 'MIT'
__version__ = '1.0.0'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Dev'
import os
from os.path import join
from os import getcwd
import sys
import argparse
from time import time
import numpy as np
from derivative import *
from printfunctions import *
from initialconditions import *
# Limit number of OMP threads used by numpy/scipy via OpenBLAS
os.environ['OMP_NUM_THREADS'] = '{:d}'.format(1)
# parameter-I/O
parser = argparse.ArgumentParser(description='Simulation of two-dimensional '\
+ 'porous media flow' +\
' \nCopyright (C) 2017 <NAME> & <NAME>')
parser.add_argument('-dest', type=str, help='Complete path to the folder '\
+ 'results will be saved to if different than script location',
default=getcwd())
parser.add_argument('-Ra','--rayleigh', type=float, help='Rayleigh number of'+\
' the system', default=100)
parser.add_argument('-Ra2','--rayleigh2', type=int, help='Second Rayleigh number of'+\
' the system', default=100)
parser.add_argument('-H','--height', type=int, help='Height of the '\
+ 'system in natural units',\
default=10)
parser.add_argument('-L','--length', type=int, help='Length of the '\
+ 'system in natural units',\
default=10)
parser.add_argument('-res','--resolution',type=int,\
help='number of grid cells per unit of length.',\
default=6)
parser.add_argument('-T','--maximum_simulation_time',type=float, \
help='duration of simulation in natural units',\
default=25.0)
parser.add_argument('-savetime','--saving_time_interval',type=float,\
help='Interval in which simulation output is dumped',\
default=0.1)
parser.add_argument('-clf', '--adaptive_time_constant', type=float,\
help='CLF-constant for adaptive timestepping',\
default = 0.05)
parser.add_argument('-plt','--plot_field',action="store_true",\
help='Plots fields for debugging / movie creation',default=False)
parser.add_argument('-saveall','--save_all_fields',action="store_true",\
help='Saves velocity fields in addition to salinity',default=False)
parser.add_argument('-amplitude','--wave_amplitude',type=float, \
help='Amplitude of sinusoidal variations in evaporation' + \
' rate at the surface boundary condition',default=0.0)
parser.add_argument('-waves','--wave_number',type=int,\
help='Number of sinusoidal variations in evaporation' + \
' rate at the surface boundary condition',default=1)
parser.add_argument('-init','--initial_condition',type=str,\
help='Type of initial conditions. Can be eather "std" for' + \
' the steady state condition or "dyn" for an exponential decay' +\
' with a length scale from fits to the salinity of the convecting system',\
default='std')
parser.add_argument('-S','--seed',type=int,\
help='Seed for the random number generator. Per default, '+\
'a random seed will be generated based on system time',default=-1)
args = parser.parse_args()
# System control parameter
RA = args.rayleigh # Rayleigh number
RA2 = args.rayleigh2 # second Rayleigh number in case we want a split system
if RA / int(RA) == 1:
RA = int(RA)
# seed for random number generator. Will be randomly generated based
# on system time per default. Use custom seed to re-run simulations
# and investigate simulation crashes
seed = args.seed
if seed == -1:
seed = int(time())
# Space constants
HEIGHT = args.height # height in natural units
LENGTH = args.length # length in natural units
#A = args.aspect_ratio # aspect ratio of length to HEIGHT = L/H
#LENGTH = HEIGHT*A
A = float(LENGTH/HEIGHT)
res = args.resolution # number of grid cells / unit of length
# Time constants
MAXTIME = args.maximum_simulation_time # maximum simulation time in natural units
SAVETIME = args.saving_time_interval
adaptive_dt_constant = args.adaptive_time_constant
SAVECOUNTER = 0
dt = 0.0001 # initial value for dt, will be adapted later
global_time = 0.0 # initial value for global time
# Upper boundary parameters:
# amplitude = amplitude of sinusoidal evaporation rate E(X)
# waves = number of waves of E(X) in the box
# phi = initial phase shift of these waves and the convection cell
amplitude = args.wave_amplitude
waves = args.wave_number
initial_condition = args.initial_condition
parameters = {'Ra': RA, 'Ra2':RA2, 'A': A , \
'amplitude': amplitude, 'waves': waves, 'phi': 0.0, \
'max_T':MAXTIME, 'clf':adaptive_dt_constant, 'res':res,\
'HEIGHT':HEIGHT, 'LENGTH':LENGTH, 'initial conditiions':initial_condition}
# I/O handling
dest = args.dest # location of results folder
run = 1
run_name = 'Ra{}_{}x{}_res{}_T{}_clf{}_amp{}_waves{}_run{}'.format(RA, HEIGHT, \
int(LENGTH), res, MAXTIME, adaptive_dt_constant,amplitude, waves, run)
SAVEPATH = join(dest, run_name)
plot_field = args.plot_field
save_all_fields = args.save_all_fields
# size of box (natural units)
length = np.array([HEIGHT*parameters['A'],HEIGHT]) #2d
# number of grid points
SIZE_Y = HEIGHT * res
size = np.array([int(SIZE_Y*parameters['A']), SIZE_Y]) #2d
size_b = np.array([size[0], size[1]-2]) #size without top/bottom boundaries
# grid spacing
dx = np.divide(length, np.array([size[0], size[1]-1])) #2d
# Create Rayleigh matrix for height dependent Rayleigh number
Ra = Rayleigh_Matrix(size, length, parameters)
# create savepath which will serve as root directory for
# the simulation results. Results for the three fields will
# be saved to three subfolders 'C', 'Ux' and 'Uz'
while os.path.exists(SAVEPATH):
run += 1
run_name = 'Ra{}_{}x{}_res{}_T{}_clf{}_amp{}_waves{}_run{}'.format(RA, HEIGHT, \
int(LENGTH), res, MAXTIME, adaptive_dt_constant,amplitude, waves, run)
SAVEPATH = join(dest, run_name)
# create directories for data storage
print('\n\nwriting to {}'.format(SAVEPATH))
fields = ['C','Ux','Uz']
os.makedirs(SAVEPATH)
for field in fields:
os.makedirs(join(SAVEPATH,field))
# values of boundaries for concentration (C from 0 to 1)
boundaries_C = [1., 0.]
# 6th order coefficients
coeff_dx = ([1., 1./3], [14./9., 1./9.] , 1, 'periodic')
# 6th order coefficients
coeff_dxx = ([1. , 2./11], [12./11, 3./11] , 2, 'periodic')
matrix_dy, dirichlet_vector_dy = CreateNP_CFDMatrix(axis = 1, \
size = size, dx = dx, grade = 1, order = 6)
matrix_dyy, dirichlet_vector_dyy = CreateNP_CFDMatrix(axis = 1, \
size = size, dx = dx, grade = 2, order = 6)
matrix_dx = CreateCFDMatrix(input = coeff_dx, \
size = size, dx = dx, axis = 0)
matrix_dxx = CreateCFDMatrix(input = coeff_dxx, \
size = size, dx = dx, axis = 0)
tensor_fft = Create_FFTMatrix(size = size, dx = dx)
derivatives = {'dx' : matrix_dx, 'dxx': matrix_dxx, \
'dy': matrix_dy, 'dyy': matrix_dyy}
dirichlet_vectors = {'dy': dirichlet_vector_dy, 'dyy': dirichlet_vector_dyy}
### helper functions
# derivative in x- and y- direction
def Derivative(F, direction):
if direction in ['dx', 'dxx']:
return np.matmul(derivatives[direction], F)
elif direction in ['dy', 'dyy']:
return np.transpose(np.matmul(derivatives[direction], np.transpose(F)))
# derivative in y-direction for the concentration applying boundaries_C
def Dirichlet_Derivative_C(F, direction):
b = np.zeros((size[1]-2,))
for i in range(2):
b += dirichlet_vectors[direction][i]*boundaries_C[i]
return np.transpose(np.matmul(derivatives[direction], np.transpose(F))) + b
# define the initial conditions
def InitialConditions(size, par, dx):
if initial_condition == 'std':
# load Steady-State
C = Load_SteadyStateC(size, dx, length)
elif initial_condition == 'dyn':
C = Load_DynamicDecayC(size, dx, length, parameters['Ra'])
elif initial_condition not in ('std', 'dyn') and parameters['Ra'] == 100:
decay_length_scale = float(initial_condition)
C = Load_SpecificDecayC(size, dx, length, decay_length_scale)
else:
print('initial condition unknown, terminating...')
sys.exit()
# load from file
#C = Load_InitialC('onecell200.npy', size= size, frequency = parameters['waves'])
# add random noise
C = AddRandomNoise(C, size, seed, factor = 0.05)
# initialize velocity fields in x and z direction as zero
Psi = np.zeros(size_b, dtype=np.float32)
Omega = np.zeros(size_b, dtype=np.float32)
Psik = np.zeros(size_b, dtype = np.complex)
U = np.array([Derivative(Psi, 'dy') ,-1 - Derivative(Psi, 'dx')])
return (U, C, Psi, Omega, Psik)
# solve advection-diffusion equation in real space
def RungeKutta(U, C, dx, dt):
def f(U,C):
C_dx = Derivative(C, 'dx')
C_dy = Dirichlet_Derivative_C(C, 'dy')
C_dxx = Derivative(C, 'dxx')
C_dyy = Dirichlet_Derivative_C(C, 'dyy')
return C_dxx + C_dyy - (U[0]*C_dx + U[1]*C_dy) # diffusion - advection
# classical Runge-Kutta method 4th order
k1 = f(U,C[:,1:-1])
k2 = f(U,C[:,1:-1] + (dt / 2.0) * k1)
k3 = f(U,C[:,1:-1] + (dt / 2.0) * k2)
k4 = f(U,C[:,1:-1] + dt * k3)
C[:,1:-1] += dt / 6.0 * (k1 + 2* k2 + 2 * k3 + k4)
return C
# primary function for the time stepping
def IntegrationStep(U, C, Psi, Psik, Omega, par, dx, global_time, dt):
# Compute Omega from Ra and concentration Matrix C in real space
Omega = +Ra[:,1:-1] * Derivative(C[:,1:-1], 'dx') - Omega0
# compute Omega in Fourier space
Omegak = np.fft.fftshift(np.fft.fft(-Omega, axis = 0), axes = 0)
# compute Psi in Fourier space
for kx in range(size[0]):
Psik[kx,:] = np.matmul(tensor_fft[:,:,kx],Omegak[kx,:])
# compute Psi in real space
Psi = np.real(np.fft.ifft(np.fft.ifftshift(Psik, axes = 0), axis = 0))
# compute velocity in real space
U = np.array([Derivative(Psi, 'dy') ,- Derivative(Psi, 'dx')]) + U0
# solve advection-diffusion equation in real space
C = RungeKutta(U, C, dx, dt)
global_time += dt
return (U, C, Psi, Omega, global_time)
# define initial conditions
print('random seed: {}'.format(seed))
parameters.update({'seed':seed})
(U, C, Psi, Omega, Psik) = InitialConditions(size, parameters, dx)
U0, Omega0 = SinusoidalEvaporation(size_b, length, parameters)
# print a file recording simulation parameters and random seed
# into the simulation result root directory
PrintParams(parameters, SAVEPATH, run_name)
adaptive_time_counter = 0
while global_time < MAXTIME:
(U, C, Psi, Omega, global_time) = IntegrationStep(U, C,\
Psi, Psik, Omega, parameters, dx, global_time, dt)
#adapt the time step
if (adaptive_time_counter % 20 ==0) or (adaptive_time_counter < 10):
dt = dx[1] / np.max(np.abs(U[1])) * adaptive_dt_constant
adaptive_time_counter += 1
# save system state every SAVETIME:
# all three fields C, Ux, Uz will be saved in binary format
if global_time > SAVECOUNTER*SAVETIME:
print('current time: {}, dt = {}'\
.format(round(global_time,4), dt))
PrintField(C, global_time, SAVETIME, 'C', savepath=join(SAVEPATH,'C'))
if save_all_fields:
PrintField(U[1,0:,0:], global_time, SAVETIME, 'Uz', savepath=join(SAVEPATH,'Uz'))
PrintField(U[0,0:,0:], global_time, SAVETIME, 'Ux', savepath=join(SAVEPATH,'Ux'))
# plot field for debugging reasons (publication quality plots
# are created separately from the field data)
if plot_field:
PlotField(C, global_time, SAVETIME, 'C', savepath=join(SAVEPATH,'C'))
PlotField(U[1,0:,0:], global_time, SAVETIME, 'Uz', savepath=join(SAVEPATH,'Uz'))
PlotField(U[0,0:,0:], global_time, SAVETIME, 'Ux', savepath=join(SAVEPATH,'Ux'))
SAVECOUNTER += 1
| [
"numpy.fft.ifftshift",
"numpy.abs",
"os.makedirs",
"argparse.ArgumentParser",
"os.getcwd",
"numpy.fft.fft",
"os.path.exists",
"numpy.zeros",
"numpy.transpose",
"time.time",
"numpy.array",
"numpy.matmul",
"os.path.join",
"sys.exit"
] | [((2103, 2243), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '(\'Simulation of two-dimensional \' + \'porous media flow\' +\n """ \nCopyright (C) 2017 <NAME> & <NAME>""")'}), '(description=\'Simulation of two-dimensional \' +\n \'porous media flow\' + """ \nCopyright (C) 2017 <NAME> & <NAME>""")\n', (2126, 2243), False, 'import argparse\n'), ((6709, 6729), 'os.path.join', 'join', (['dest', 'run_name'], {}), '(dest, run_name)\n', (6713, 6729), False, 'from os.path import join\n'), ((6839, 6883), 'numpy.array', 'np.array', (["[HEIGHT * parameters['A'], HEIGHT]"], {}), "([HEIGHT * parameters['A'], HEIGHT])\n", (6847, 6883), True, 'import numpy as np\n'), ((7002, 7034), 'numpy.array', 'np.array', (['[size[0], size[1] - 2]'], {}), '([size[0], size[1] - 2])\n', (7010, 7034), True, 'import numpy as np\n'), ((7428, 7452), 'os.path.exists', 'os.path.exists', (['SAVEPATH'], {}), '(SAVEPATH)\n', (7442, 7452), False, 'import os\n'), ((7759, 7780), 'os.makedirs', 'os.makedirs', (['SAVEPATH'], {}), '(SAVEPATH)\n', (7770, 7780), False, 'import os\n'), ((7108, 7140), 'numpy.array', 'np.array', (['[size[0], size[1] - 1]'], {}), '([size[0], size[1] - 1])\n', (7116, 7140), True, 'import numpy as np\n'), ((7630, 7650), 'os.path.join', 'join', (['dest', 'run_name'], {}), '(dest, run_name)\n', (7634, 7650), False, 'from os.path import join\n'), ((9117, 9141), 'numpy.zeros', 'np.zeros', (['(size[1] - 2,)'], {}), '((size[1] - 2,))\n', (9125, 9141), True, 'import numpy as np\n'), ((10056, 10090), 'numpy.zeros', 'np.zeros', (['size_b'], {'dtype': 'np.float32'}), '(size_b, dtype=np.float32)\n', (10064, 10090), True, 'import numpy as np\n'), ((10100, 10134), 'numpy.zeros', 'np.zeros', (['size_b'], {'dtype': 'np.float32'}), '(size_b, dtype=np.float32)\n', (10108, 10134), True, 'import numpy as np\n'), ((10143, 10177), 'numpy.zeros', 'np.zeros', (['size_b'], {'dtype': 'np.complex'}), '(size_b, dtype=np.complex)\n', (10151, 10177), True, 'import numpy as np\n'), ((2428, 2436), 'os.getcwd', 'getcwd', ([], {}), '()\n', (2434, 2436), False, 'from os import getcwd\n'), ((5279, 5285), 'time.time', 'time', ([], {}), '()\n', (5283, 5285), False, 'from time import time\n'), ((7815, 7836), 'os.path.join', 'join', (['SAVEPATH', 'field'], {}), '(SAVEPATH, field)\n', (7819, 7836), False, 'from os.path import join\n'), ((8851, 8887), 'numpy.matmul', 'np.matmul', (['derivatives[direction]', 'F'], {}), '(derivatives[direction], F)\n', (8860, 8887), True, 'import numpy as np\n'), ((11129, 11155), 'numpy.fft.fft', 'np.fft.fft', (['(-Omega)'], {'axis': '(0)'}), '(-Omega, axis=0)\n', (11139, 11155), True, 'import numpy as np\n'), ((11244, 11290), 'numpy.matmul', 'np.matmul', (['tensor_fft[:, :, kx]', 'Omegak[kx, :]'], {}), '(tensor_fft[:, :, kx], Omegak[kx, :])\n', (11253, 11290), True, 'import numpy as np\n'), ((11344, 11374), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['Psik'], {'axes': '(0)'}), '(Psik, axes=0)\n', (11360, 11374), True, 'import numpy as np\n'), ((9270, 9285), 'numpy.transpose', 'np.transpose', (['F'], {}), '(F)\n', (9282, 9285), True, 'import numpy as np\n'), ((9804, 9814), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9812, 9814), False, 'import sys\n'), ((12640, 12659), 'os.path.join', 'join', (['SAVEPATH', '"""C"""'], {}), "(SAVEPATH, 'C')\n", (12644, 12659), False, 'from os.path import join\n'), ((8978, 8993), 'numpy.transpose', 'np.transpose', (['F'], {}), '(F)\n', (8990, 8993), True, 'import numpy as np\n'), ((12305, 12317), 'numpy.abs', 'np.abs', (['U[1]'], {}), '(U[1])\n', (12311, 12317), True, 'import numpy as np\n'), ((12746, 12766), 'os.path.join', 'join', (['SAVEPATH', '"""Uz"""'], {}), "(SAVEPATH, 'Uz')\n", (12750, 12766), False, 'from os.path import join\n'), ((12831, 12851), 'os.path.join', 'join', (['SAVEPATH', '"""Ux"""'], {}), "(SAVEPATH, 'Ux')\n", (12835, 12851), False, 'from os.path import join\n'), ((13035, 13054), 'os.path.join', 'join', (['SAVEPATH', '"""C"""'], {}), "(SAVEPATH, 'C')\n", (13039, 13054), False, 'from os.path import join\n'), ((13118, 13138), 'os.path.join', 'join', (['SAVEPATH', '"""Uz"""'], {}), "(SAVEPATH, 'Uz')\n", (13122, 13138), False, 'from os.path import join\n'), ((13202, 13222), 'os.path.join', 'join', (['SAVEPATH', '"""Ux"""'], {}), "(SAVEPATH, 'Ux')\n", (13206, 13222), False, 'from os.path import join\n')] |
# -*- coding: utf-8 -*-
"""
test_celllab_cts.py: Unit-test function for CellLabCTS.
Created on Thu Jul 9 08:20:06 2015
@author: gtucker
"""
from nose.tools import assert_equal
from numpy.testing import assert_array_equal
from landlab import RasterModelGrid, HexModelGrid
from landlab.ca.celllab_cts import Transition, Event
from landlab.ca.raster_cts import RasterCTS
from landlab.ca.oriented_raster_cts import OrientedRasterCTS
from landlab.ca.hex_cts import HexCTS
from landlab.ca.oriented_hex_cts import OrientedHexCTS
from heapq import heappush
from heapq import heappop
import numpy as np
# For dev
from landlab.ca.celllab_cts import _RUN_NEW
def callback_function(ca, node1, node2, time_now):
"""
This function is passed as an argument to a transition event, and then
called automatically by CellLabCTSModel's do_event() method.
"""
pass
# elapsed_time = time_now - ca.last_update_time[node1]
# if ca.node_state[node1]==1:
# ca.prop_data[ca.propid[node1]]+=100*elapsed_time
# elapsed_time = time_now - ca.last_update_time[node2]
# if ca.node_state[node2]==1:
# ca.prop_data[ca.propid[node2]]+=100*elapsed_time
#
def test_transition():
"""Test instantiation of Transition() object."""
t = Transition((0, 0, 0), (1, 1, 0), 1.0, name='test',
swap_properties=False, prop_update_fn=None)
assert_equal(t.from_state, (0,0,0))
assert_equal(t.to_state, (1,1,0))
assert_equal(t.rate, 1.0)
assert_equal(t.name, 'test')
assert_equal(t.swap_properties, False)
assert_equal(t.prop_update_fn, None)
def test_raster_cts():
"""
Tests instantiation of a RasterCTS and implementation of one transition,
with a callback function.
"""
# Set up a small grid with no events scheduled
mg = RasterModelGrid(4, 4, 1.0)
mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
node_state_grid = mg.add_ones('node', 'node_state_map', dtype=int)
node_state_grid[6] = 0
ns_dict = { 0 : 'black', 1 : 'white' }
xn_list = []
xn_list.append( Transition((1,0,0), (0,1,0), 0.1, '', True, callback_function))
pd = mg.add_zeros('node', 'property_data', dtype=int)
pd[5] = 50
ca = RasterCTS(mg, ns_dict, xn_list, node_state_grid, prop_data=pd,
prop_reset_value=0)
# Test the data structures
assert (ca.num_link_states==4), 'wrong number of link states'
assert (ca.prop_data[5]==50), 'error in property data'
assert (ca.num_node_states==2), 'error in num_node_states'
assert (ca.link_orientation[-1]==0), 'error in link orientation array'
assert (ca.link_state_dict[(1, 0, 0)]==2), 'error in link state dict'
assert (ca.n_xn[2]==1), 'error in n_xn'
assert (ca.node_pair[1]==(0, 1, 0)), 'error in cell_pair list'
if _RUN_NEW:
assert (len(ca.priority_queue._queue)==1), 'event queue has wrong size'
assert (ca.next_trn_id.size==24), 'wrong size next_trn_id'
assert (ca.trn_id.shape==(4, 1)), 'wrong size for xn_to'
assert (ca.trn_id[2][0]==0), 'wrong value in xn_to'
assert (ca.trn_to[0] == 1), 'wrong trn_to state'
assert (ca.trn_rate[0] == 0.1), 'wrong trn rate'
assert (ca.trn_propswap[0] == 1), 'wrong trn propswap'
assert (ca.trn_prop_update_fn == callback_function), 'wrong prop upd'
else:
assert (len(ca.event_queue)==1), 'event queue has wrong size'
assert (ca.xn_to.size==4), 'wrong size for xn_to'
assert (ca.xn_to.shape==(4, 1)), 'wrong size for xn_to'
assert (ca.xn_to[2][0]==1), 'wrong value in xn_to'
assert (ca.xn_rate[2][0]==0.1), 'error in transition rate array'
# Manipulate the data in the event queue for testing:
if _RUN_NEW:
# pop the scheduled event off the queue
(event_time, index, event_link) = ca.priority_queue.pop()
assert (ca.priority_queue._queue==[]), \
'event queue should now be empty but is not'
# engineer an event
ca.priority_queue.push(8, 1.0)
ca.next_update[8] = 1.0
ca.next_trn_id[8] = 0
else:
# pop the scheduled event off the queue
ev = heappop(ca.event_queue)
assert (ca.event_queue==[]), 'event queue should now be empty but is not'
# engineer an event
ev.time = 1.0
ev.link = 8
ev.xn_to = 1
ev.propswap = True
ev.prop_update_fn = callback_function
ca.next_update[8] = 1.0
# push it onto the event queue
heappush(ca.event_queue, ev)
# run the CA
ca.run(2.0)
# some more tests.
# Is current time advancing correctly? (should only go to 1.0, not 2.0)
# Did the two nodes (5 and 6) correctly exchange states?
# Did the property ID and data arrays get updated? Note that the "propswap"
# should switch propids between nodes 5 and 6, and the callback function
# should increase the value of prop_data in the "swap" node from 50 to 150.
assert (ca.current_time==1.0), 'current time incorrect'
assert (ca.node_state[5]==0), 'error in node state 5'
assert (ca.node_state[6]==1), 'error in node state 6'
#assert (ca.prop_data[ca.propid[6]]==150), 'error in prop swap'
def test_oriented_raster_cts():
"""Tests instantiation of an OrientedRasterCTS() object"""
mg = RasterModelGrid(3, 3, 1.0)
nsd = {0 : 'oui', 1 : 'non'}
xnlist = []
xnlist.append(Transition((0,1,0), (1,1,0), 1.0, 'hopping'))
nsg = mg.add_zeros('node', 'node_state_grid')
orcts = OrientedRasterCTS(mg, nsd, xnlist, nsg)
assert_equal(orcts.num_link_states, 8)
#assert_array_equal(orcts.link_orientation, [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0])
assert_array_equal(orcts.link_orientation, [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0])
def test_hex_cts():
"""Tests instantiation of a HexCTS() object"""
mg = HexModelGrid(3, 2, 1.0, orientation='vertical', reorient_links=True)
nsd = {0 : 'zero', 1 : 'one'}
xnlist = []
xnlist.append(Transition((0,1,0), (1,1,0), 1.0, 'transitioning'))
nsg = mg.add_zeros('node', 'node_state_grid')
hcts = HexCTS(mg, nsd, xnlist, nsg)
assert_equal(hcts.num_link_states, 4)
assert_array_equal(hcts.link_orientation, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_oriented_hex_cts():
"""Tests instantiation of an OrientedHexCTS() object"""
mg = HexModelGrid(3, 2, 1.0, orientation='vertical', reorient_links=True)
nsd = {0 : 'zero', 1 : 'one'}
xnlist = []
xnlist.append(Transition((0,1,0), (1,1,0), 1.0, 'transitioning'))
nsg = mg.add_zeros('node', 'node_state_grid')
ohcts = OrientedHexCTS(mg, nsd, xnlist, nsg)
assert_equal(ohcts.num_link_states, 12)
#assert_array_equal(ohcts.link_orientation, [2, 1, 0, 0, 0, 2, 1, 0, 2, 1, 0])
assert_array_equal(ohcts.link_orientation, [2, 0, 1, 0, 2, 0, 1, 0, 2, 0, 1])
def test_priority_queue():
"""Test import and use of priority queue."""
from ..cfuncs import PriorityQueue
# Create a priority queue
pq = PriorityQueue()
# push a bunch of events
pq.push(2, 2.2)
pq.push(5, 5.5)
pq.push(0, 0.11)
pq.push(4, 4.4)
pq.push(1, 1.1)
pq.push(3, 3.3)
# pop a bunch of events
(priority, index, item) = pq.pop()
assert (priority == 0.11), 'incorrect priority in PQ test'
assert (index == 2), 'incorrect index in PQ test'
assert (item == 0), 'incorrect item in PQ test'
(priority, index, item) = pq.pop()
assert (priority == 1.1), 'incorrect priority in PQ test'
assert (index == 4), 'incorrect index in PQ test'
assert (item == 1), 'incorrect item in PQ test'
(priority, index, item) = pq.pop()
assert (priority == 2.2), 'incorrect priority in PQ test'
assert (index == 0), 'incorrect index in PQ test'
assert (item == 2), 'incorrect item in PQ test'
(priority, index, item) = pq.pop()
assert (priority == 3.3), 'incorrect priority in PQ test'
assert (index == 5), 'incorrect index in PQ test'
assert (item == 3), 'incorrect item in PQ test'
(priority, index, item) = pq.pop()
assert (priority == 4.4), 'incorrect priority in PQ test'
assert (index == 3), 'incorrect index in PQ test'
assert (item == 4), 'incorrect item in PQ test'
(priority, index, item) = pq.pop()
assert (priority == 5.5), 'incorrect priority in PQ test'
assert (index == 1), 'incorrect index in PQ test'
assert (item == 5), 'incorrect item in PQ test'
def test_run_oriented_raster():
"""Test running with a small grid, 2 states, 4 transition types."""
# Create an OrientedRaster with a 3x5 raster grid. Test model has 2 node
# states and 4 transition types.
grid = RasterModelGrid((3, 5))
nsd = {0 : 'zero', 1 : 'one'}
trn_list = []
trn_list.append(Transition((0, 1, 0), (1, 0, 0), 1.0))
trn_list.append(Transition((1, 0, 0), (0, 1, 0), 2.0))
trn_list.append(Transition((0, 1, 1), (1, 0, 1), 3.0))
trn_list.append(Transition((0, 1, 1), (1, 1, 1), 4.0))
ins = np.arange(15) % 2 # makes a checkerboard pattern
cts = OrientedRasterCTS(grid, nsd, trn_list, ins)
# Run to 1st transition, at ~0.12
cts.run(0.15)
assert_array_equal(cts.node_state,
[0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0])
# Run to 2nd transition, at ~0.19
cts.run(0.2)
assert_array_equal(cts.node_state,
[0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0])
# Run to 3rd transition, at ~0.265
cts.run(0.27)
assert_array_equal(cts.node_state,
[0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0])
# Run to 4th transition, at ~0.276 (transition is ignored)
cts.run(0.28)
assert_array_equal(cts.node_state,
[0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0])
# Run to 5th transition, at ~0.461 (ignored)
cts.run(0.5)
assert_array_equal(cts.node_state,
[0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0])
# Run to 6th transition, at ~0.648
cts.run(0.65)
assert_array_equal(cts.node_state,
[0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0])
def test_grain_hill_model():
"""Run a lattice-grain-based hillslope evolution model."""
from .grain_hill_as_class import GrainHill
params = {
'number_of_node_rows' : 10,
'number_of_node_columns' : 10,
'report_interval' : 5.0,
'run_duration' : 10.0,
'output_interval' : 1.0e5,
'settling_rate' : 220000000.0,
'disturbance_rate' : 0.01,
'uplift_interval' : 4000.0,
'friction_coef' : 1.0,
'plot_interval' : 1.0,
'show_plots' : False,
}
grid_size = (int(params['number_of_node_rows']),
int(params['number_of_node_columns']))
grain_hill_model = GrainHill(grid_size, **params)
grain_hill_model.run()
# Now test
assert_array_equal(grain_hill_model.grid.at_node['node_state'][:18],
[8, 7, 7, 7, 7, 7, 7, 7, 7, 8, 0, 7, 7, 7, 7, 0, 7, 7])
# Try with an uplift step
params['uplift_interval'] = 5.0
params['run_duration'] = 15.0
grain_hill_model = GrainHill(grid_size, **params)
grain_hill_model.run()
# Test
assert_array_equal(grain_hill_model.grid.at_node['node_state'][20:38],
[0, 7, 7, 7, 7, 0, 7, 7, 7, 0, 0, 0, 7, 7, 0, 0, 0, 7])
if __name__ == '__main__':
test_transition()
test_raster_cts()
test_oriented_raster_cts()
test_hex_cts()
test_oriented_hex_cts()
test_run_oriented_raster()
test_grain_hill_model()
| [
"landlab.ca.oriented_raster_cts.OrientedRasterCTS",
"landlab.HexModelGrid",
"landlab.RasterModelGrid",
"landlab.ca.celllab_cts.Transition",
"heapq.heappush",
"numpy.testing.assert_array_equal",
"landlab.ca.oriented_hex_cts.OrientedHexCTS",
"nose.tools.assert_equal",
"heapq.heappop",
"numpy.arange"... | [((1258, 1356), 'landlab.ca.celllab_cts.Transition', 'Transition', (['(0, 0, 0)', '(1, 1, 0)', '(1.0)'], {'name': '"""test"""', 'swap_properties': '(False)', 'prop_update_fn': 'None'}), "((0, 0, 0), (1, 1, 0), 1.0, name='test', swap_properties=False,\n prop_update_fn=None)\n", (1268, 1356), False, 'from landlab.ca.celllab_cts import Transition, Event\n'), ((1376, 1413), 'nose.tools.assert_equal', 'assert_equal', (['t.from_state', '(0, 0, 0)'], {}), '(t.from_state, (0, 0, 0))\n', (1388, 1413), False, 'from nose.tools import assert_equal\n'), ((1416, 1451), 'nose.tools.assert_equal', 'assert_equal', (['t.to_state', '(1, 1, 0)'], {}), '(t.to_state, (1, 1, 0))\n', (1428, 1451), False, 'from nose.tools import assert_equal\n'), ((1454, 1479), 'nose.tools.assert_equal', 'assert_equal', (['t.rate', '(1.0)'], {}), '(t.rate, 1.0)\n', (1466, 1479), False, 'from nose.tools import assert_equal\n'), ((1484, 1512), 'nose.tools.assert_equal', 'assert_equal', (['t.name', '"""test"""'], {}), "(t.name, 'test')\n", (1496, 1512), False, 'from nose.tools import assert_equal\n'), ((1517, 1555), 'nose.tools.assert_equal', 'assert_equal', (['t.swap_properties', '(False)'], {}), '(t.swap_properties, False)\n', (1529, 1555), False, 'from nose.tools import assert_equal\n'), ((1560, 1596), 'nose.tools.assert_equal', 'assert_equal', (['t.prop_update_fn', 'None'], {}), '(t.prop_update_fn, None)\n', (1572, 1596), False, 'from nose.tools import assert_equal\n'), ((1806, 1832), 'landlab.RasterModelGrid', 'RasterModelGrid', (['(4)', '(4)', '(1.0)'], {}), '(4, 4, 1.0)\n', (1821, 1832), False, 'from landlab import RasterModelGrid, HexModelGrid\n'), ((2224, 2310), 'landlab.ca.raster_cts.RasterCTS', 'RasterCTS', (['mg', 'ns_dict', 'xn_list', 'node_state_grid'], {'prop_data': 'pd', 'prop_reset_value': '(0)'}), '(mg, ns_dict, xn_list, node_state_grid, prop_data=pd,\n prop_reset_value=0)\n', (2233, 2310), False, 'from landlab.ca.raster_cts import RasterCTS\n'), ((5382, 5408), 'landlab.RasterModelGrid', 'RasterModelGrid', (['(3)', '(3)', '(1.0)'], {}), '(3, 3, 1.0)\n', (5397, 5408), False, 'from landlab import RasterModelGrid, HexModelGrid\n'), ((5584, 5623), 'landlab.ca.oriented_raster_cts.OrientedRasterCTS', 'OrientedRasterCTS', (['mg', 'nsd', 'xnlist', 'nsg'], {}), '(mg, nsd, xnlist, nsg)\n', (5601, 5623), False, 'from landlab.ca.oriented_raster_cts import OrientedRasterCTS\n'), ((5629, 5667), 'nose.tools.assert_equal', 'assert_equal', (['orcts.num_link_states', '(8)'], {}), '(orcts.num_link_states, 8)\n', (5641, 5667), False, 'from nose.tools import assert_equal\n'), ((5758, 5843), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['orcts.link_orientation', '[0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0]'], {}), '(orcts.link_orientation, [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0]\n )\n', (5776, 5843), False, 'from numpy.testing import assert_array_equal\n'), ((5921, 5989), 'landlab.HexModelGrid', 'HexModelGrid', (['(3)', '(2)', '(1.0)'], {'orientation': '"""vertical"""', 'reorient_links': '(True)'}), "(3, 2, 1.0, orientation='vertical', reorient_links=True)\n", (5933, 5989), False, 'from landlab import RasterModelGrid, HexModelGrid\n'), ((6171, 6199), 'landlab.ca.hex_cts.HexCTS', 'HexCTS', (['mg', 'nsd', 'xnlist', 'nsg'], {}), '(mg, nsd, xnlist, nsg)\n', (6177, 6199), False, 'from landlab.ca.hex_cts import HexCTS\n'), ((6205, 6242), 'nose.tools.assert_equal', 'assert_equal', (['hcts.num_link_states', '(4)'], {}), '(hcts.num_link_states, 4)\n', (6217, 6242), False, 'from nose.tools import assert_equal\n'), ((6247, 6323), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['hcts.link_orientation', '[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '(hcts.link_orientation, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (6265, 6323), False, 'from numpy.testing import assert_array_equal\n'), ((6424, 6492), 'landlab.HexModelGrid', 'HexModelGrid', (['(3)', '(2)', '(1.0)'], {'orientation': '"""vertical"""', 'reorient_links': '(True)'}), "(3, 2, 1.0, orientation='vertical', reorient_links=True)\n", (6436, 6492), False, 'from landlab import RasterModelGrid, HexModelGrid\n'), ((6675, 6711), 'landlab.ca.oriented_hex_cts.OrientedHexCTS', 'OrientedHexCTS', (['mg', 'nsd', 'xnlist', 'nsg'], {}), '(mg, nsd, xnlist, nsg)\n', (6689, 6711), False, 'from landlab.ca.oriented_hex_cts import OrientedHexCTS\n'), ((6721, 6760), 'nose.tools.assert_equal', 'assert_equal', (['ohcts.num_link_states', '(12)'], {}), '(ohcts.num_link_states, 12)\n', (6733, 6760), False, 'from nose.tools import assert_equal\n'), ((6848, 6925), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['ohcts.link_orientation', '[2, 0, 1, 0, 2, 0, 1, 0, 2, 0, 1]'], {}), '(ohcts.link_orientation, [2, 0, 1, 0, 2, 0, 1, 0, 2, 0, 1])\n', (6866, 6925), False, 'from numpy.testing import assert_array_equal\n'), ((8774, 8797), 'landlab.RasterModelGrid', 'RasterModelGrid', (['(3, 5)'], {}), '((3, 5))\n', (8789, 8797), False, 'from landlab import RasterModelGrid, HexModelGrid\n'), ((9156, 9199), 'landlab.ca.oriented_raster_cts.OrientedRasterCTS', 'OrientedRasterCTS', (['grid', 'nsd', 'trn_list', 'ins'], {}), '(grid, nsd, trn_list, ins)\n', (9173, 9199), False, 'from landlab.ca.oriented_raster_cts import OrientedRasterCTS\n'), ((9261, 9347), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['cts.node_state', '[0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0]'], {}), '(cts.node_state, [0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, \n 1, 0])\n', (9279, 9347), False, 'from numpy.testing import assert_array_equal\n'), ((9426, 9512), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['cts.node_state', '[0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0]'], {}), '(cts.node_state, [0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, \n 1, 0])\n', (9444, 9512), False, 'from numpy.testing import assert_array_equal\n'), ((9593, 9679), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['cts.node_state', '[0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0]'], {}), '(cts.node_state, [0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, \n 1, 0])\n', (9611, 9679), False, 'from numpy.testing import assert_array_equal\n'), ((9784, 9870), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['cts.node_state', '[0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0]'], {}), '(cts.node_state, [0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, \n 1, 0])\n', (9802, 9870), False, 'from numpy.testing import assert_array_equal\n'), ((9960, 10046), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['cts.node_state', '[0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0]'], {}), '(cts.node_state, [0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, \n 1, 0])\n', (9978, 10046), False, 'from numpy.testing import assert_array_equal\n'), ((10127, 10213), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['cts.node_state', '[0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0]'], {}), '(cts.node_state, [0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, \n 1, 0])\n', (10145, 10213), False, 'from numpy.testing import assert_array_equal\n'), ((10986, 11114), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["grain_hill_model.grid.at_node['node_state'][:18]", '[8, 7, 7, 7, 7, 7, 7, 7, 7, 8, 0, 7, 7, 7, 7, 0, 7, 7]'], {}), "(grain_hill_model.grid.at_node['node_state'][:18], [8, 7,\n 7, 7, 7, 7, 7, 7, 7, 8, 0, 7, 7, 7, 7, 0, 7, 7])\n", (11004, 11114), False, 'from numpy.testing import assert_array_equal\n'), ((11332, 11463), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["grain_hill_model.grid.at_node['node_state'][20:38]", '[0, 7, 7, 7, 7, 0, 7, 7, 7, 0, 0, 0, 7, 7, 0, 0, 0, 7]'], {}), "(grain_hill_model.grid.at_node['node_state'][20:38], [0, \n 7, 7, 7, 7, 0, 7, 7, 7, 0, 0, 0, 7, 7, 0, 0, 0, 7])\n", (11350, 11463), False, 'from numpy.testing import assert_array_equal\n'), ((2078, 2144), 'landlab.ca.celllab_cts.Transition', 'Transition', (['(1, 0, 0)', '(0, 1, 0)', '(0.1)', '""""""', '(True)', 'callback_function'], {}), "((1, 0, 0), (0, 1, 0), 0.1, '', True, callback_function)\n", (2088, 2144), False, 'from landlab.ca.celllab_cts import Transition, Event\n'), ((4212, 4235), 'heapq.heappop', 'heappop', (['ca.event_queue'], {}), '(ca.event_queue)\n', (4219, 4235), False, 'from heapq import heappop\n'), ((4571, 4599), 'heapq.heappush', 'heappush', (['ca.event_queue', 'ev'], {}), '(ca.event_queue, ev)\n', (4579, 4599), False, 'from heapq import heappush\n'), ((5476, 5524), 'landlab.ca.celllab_cts.Transition', 'Transition', (['(0, 1, 0)', '(1, 1, 0)', '(1.0)', '"""hopping"""'], {}), "((0, 1, 0), (1, 1, 0), 1.0, 'hopping')\n", (5486, 5524), False, 'from landlab.ca.celllab_cts import Transition, Event\n'), ((6058, 6112), 'landlab.ca.celllab_cts.Transition', 'Transition', (['(0, 1, 0)', '(1, 1, 0)', '(1.0)', '"""transitioning"""'], {}), "((0, 1, 0), (1, 1, 0), 1.0, 'transitioning')\n", (6068, 6112), False, 'from landlab.ca.celllab_cts import Transition, Event\n'), ((6561, 6615), 'landlab.ca.celllab_cts.Transition', 'Transition', (['(0, 1, 0)', '(1, 1, 0)', '(1.0)', '"""transitioning"""'], {}), "((0, 1, 0), (1, 1, 0), 1.0, 'transitioning')\n", (6571, 6615), False, 'from landlab.ca.celllab_cts import Transition, Event\n'), ((8870, 8907), 'landlab.ca.celllab_cts.Transition', 'Transition', (['(0, 1, 0)', '(1, 0, 0)', '(1.0)'], {}), '((0, 1, 0), (1, 0, 0), 1.0)\n', (8880, 8907), False, 'from landlab.ca.celllab_cts import Transition, Event\n'), ((8929, 8966), 'landlab.ca.celllab_cts.Transition', 'Transition', (['(1, 0, 0)', '(0, 1, 0)', '(2.0)'], {}), '((1, 0, 0), (0, 1, 0), 2.0)\n', (8939, 8966), False, 'from landlab.ca.celllab_cts import Transition, Event\n'), ((8988, 9025), 'landlab.ca.celllab_cts.Transition', 'Transition', (['(0, 1, 1)', '(1, 0, 1)', '(3.0)'], {}), '((0, 1, 1), (1, 0, 1), 3.0)\n', (8998, 9025), False, 'from landlab.ca.celllab_cts import Transition, Event\n'), ((9047, 9084), 'landlab.ca.celllab_cts.Transition', 'Transition', (['(0, 1, 1)', '(1, 1, 1)', '(4.0)'], {}), '((0, 1, 1), (1, 1, 1), 4.0)\n', (9057, 9084), False, 'from landlab.ca.celllab_cts import Transition, Event\n'), ((9096, 9109), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (9105, 9109), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# File: maputils.py
# Copyright 2021 Dr. <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions related to mapping tasks
"""
import functools
import itertools
from types import TracebackType
from typing import Any, Callable, Dict, List, Optional, Union
import numpy as np
from tabulate import tabulate
from termcolor import colored
from ..utils.detection_types import DP, BaseExceptionType, S, T
from ..utils.logger import log_once, logger
__all__ = ["MappingContextManager", "DefaultMapper", "maybe_get_fake_score", "LabelSummarizer", "curry"]
class MappingContextManager:
"""
A context for logging and catching some exceptions. Useful in a mapping function. It will remember outside the
context if an exception has been thrown.
"""
def __init__(self, dp_name: Optional[str] = None) -> None:
"""
:param dp_name: A name for the datapoint to be mapped
"""
self.dp_name = dp_name if dp_name is not None else ""
self.context_error = True
def __enter__(self) -> "MappingContextManager":
"""
context enter
"""
return self
def __exit__(
self,
exc_type: Optional[BaseExceptionType],
exc_val: Optional[BaseExceptionType],
exc_tb: Optional[TracebackType],
) -> Optional[bool]:
"""
context exit
"""
if exc_type in (KeyError, ValueError, IndexError, AssertionError) and exc_tb is not None:
log_once(
f"""dp: {self.dp_name}, err: {type(exc_val).__name__},
msg: {str(exc_val)} in: {str(exc_tb.tb_frame)} will be filtered"""
)
return True
if exc_type is None:
self.context_error = False
return None
class DefaultMapper: # pylint: disable=R0903
"""
A class that wraps a function and places some pre-defined values starting from the second argument once the
function is invoked.
https://stackoverflow.com/questions/36314/what-is-currying
"""
def __init__(self, func: Callable[[DP, S], T], *args: Any, **kwargs: Any) -> None:
"""
:param func: A mapping function
:param args: Default args to pass to the function
:param kwargs: Default kwargs to pass to the function
"""
self.func = func
self.argument_args = args
self.argument_kwargs = kwargs
def __call__(self, dp: Any) -> Any:
"""
:param dp: datapoint within a dataflow
:return: The return value of the invoked function with default arguments.
"""
return self.func(dp, *self.argument_args, **self.argument_kwargs)
def curry(func: Callable[..., T]) -> Callable[..., Callable[[DP], T]]:
"""
Decorator for converting functions that map
dps: Union[JsonDict,Image] -> Union[JsonDict,Image]
to DefaultMappers. They will be initialized with all arguments except dp and can be called later with only the
datapoint as argument. This setting is useful when incorporating the function within a dataflow.
**Example:**
.. code-block:: python
@curry
def json_to_image(dp, config_arg_1, config_arg_2,...) -> Image:
...
can be applied like:
.. code-block:: python
df = ...
df = MapData(df,json_to_image(config_arg_1=val_1,config_arg_2=val_2))
:param func: A callable [[:class:`Image`],[Any]] -> [:class:`Image`]
:return: A DefaultMapper
"""
@functools.wraps(func)
def wrap(*args: Any, **kwargs: Any) -> DefaultMapper:
return DefaultMapper(func, *args, **kwargs)
return wrap
def maybe_get_fake_score(add_fake_score: bool) -> Optional[float]:
"""
Returns a fake score, if add_fake_score = True. Will otherwise return None
:param add_fake_score: boolean
:return: A uniform random variable in (0,1)
"""
if add_fake_score:
return np.random.uniform(0.0, 1.0, 1)[0]
return None
class LabelSummarizer:
"""
A class for generating label statistics. Useful, when mapping and generating a SummaryAnnotation.
.. code-block:: python
summarizer = LabelSummarizer({"1": "label_1","2":"label_2"})
for dp in some_dataflow:
summarizer.dump(dp["label_id"])
summarizer.print_summary_histogram()
"""
def __init__(self, categories: Dict[str, str]) -> None:
"""
:param categories: A dict of categories as given as in categories.get_categories().
"""
self.categories = categories
cat_numbers = len(self.categories.keys())
self.hist_bins = np.arange(1, cat_numbers + 2)
self.summary = np.zeros(cat_numbers)
def dump(self, item: Union[List[Union[str, int]], str, int]) -> None:
"""
Dump a category number
:param item: A category number.
"""
np_item = np.asarray(item, dtype="int8")
self.summary += np.histogram(np_item, bins=self.hist_bins)[0]
def get_summary(self) -> Dict[str, np.int32]:
"""
Get a dictionary with category ids and the number dumped
"""
return dict(list(zip(self.categories.keys(), self.summary.astype(np.int32))))
def print_summary_histogram(self) -> None:
"""
Prints a summary from all dumps.
"""
data = list(itertools.chain(*[[self.categories[str(i + 1)], v] for i, v in enumerate(self.summary[:-1])]))
num_columns = min(6, len(data))
total_img_anns = sum(data[1::2])
data.extend([None] * ((num_columns - len(data) % num_columns) % num_columns))
data.extend(["total", total_img_anns])
data = itertools.zip_longest(*[data[i::num_columns] for i in range(num_columns)]) # type: ignore
table = tabulate(
data, headers=["category", "#box"] * (num_columns // 2), tablefmt="pipe", stralign="center", numalign="left"
)
logger.info("Ground-Truth category distribution:\n %s", colored(table, "cyan"))
| [
"numpy.random.uniform",
"numpy.asarray",
"numpy.zeros",
"termcolor.colored",
"numpy.histogram",
"tabulate.tabulate",
"numpy.arange",
"functools.wraps"
] | [((4073, 4094), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (4088, 4094), False, 'import functools\n'), ((5213, 5242), 'numpy.arange', 'np.arange', (['(1)', '(cat_numbers + 2)'], {}), '(1, cat_numbers + 2)\n', (5222, 5242), True, 'import numpy as np\n'), ((5266, 5287), 'numpy.zeros', 'np.zeros', (['cat_numbers'], {}), '(cat_numbers)\n', (5274, 5287), True, 'import numpy as np\n'), ((5477, 5507), 'numpy.asarray', 'np.asarray', (['item'], {'dtype': '"""int8"""'}), "(item, dtype='int8')\n", (5487, 5507), True, 'import numpy as np\n'), ((6368, 6491), 'tabulate.tabulate', 'tabulate', (['data'], {'headers': "(['category', '#box'] * (num_columns // 2))", 'tablefmt': '"""pipe"""', 'stralign': '"""center"""', 'numalign': '"""left"""'}), "(data, headers=['category', '#box'] * (num_columns // 2), tablefmt=\n 'pipe', stralign='center', numalign='left')\n", (6376, 6491), False, 'from tabulate import tabulate\n'), ((4508, 4538), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', '(1)'], {}), '(0.0, 1.0, 1)\n', (4525, 4538), True, 'import numpy as np\n'), ((5532, 5574), 'numpy.histogram', 'np.histogram', (['np_item'], {'bins': 'self.hist_bins'}), '(np_item, bins=self.hist_bins)\n', (5544, 5574), True, 'import numpy as np\n'), ((6573, 6595), 'termcolor.colored', 'colored', (['table', '"""cyan"""'], {}), "(table, 'cyan')\n", (6580, 6595), False, 'from termcolor import colored\n')] |
__author__ = '<NAME>'
import numpy as np
from matplotlib import pyplot as plt
def PlotData(Data, size=50, showGr = 1, newWin = 1, titleT = None):
if newWin:
plt.figure()
if titleT != None:
plt.title(titleT)
plt.scatter(Data[:,1][Data[:,0]==0], Data[:,2][Data[:,0]==0], color='red', s=size)
plt.scatter(Data[:,1][Data[:,0]==1], Data[:,2][Data[:,0]==1], color='green', s=size)
plt.scatter(Data[:,1][Data[:,0]==2], Data[:,2][Data[:,0]==2], color='blue', s=size)
if showGr:
plt.show()
def myKNN(k, Data, NewSamples):
if k < 1:
k = 1
elif k > Data.shape[0]:
k = Data.shape[0]
for i in range(NewSamples.shape[0]):
distVec = np.sum((Data[:,1:] - NewSamples[i,1:]) ** 2, axis=1) ** 0.5
sortInds = np.argsort(distVec)
kNearest = sortInds[0:k]
unique, counts = np.unique(Data[kNearest,0], return_counts=True)
maxApp = np.max(counts)
if len(counts[counts == maxApp]) > 1:
labArr = unique[counts == maxApp].ravel()
distToL = np.zeros(len(labArr))
for j in range(len(labArr)):
distToL[j] = np.sum(distVec[kNearest][Data[kNearest, 0] == labArr[j]])
newLabel = labArr[np.argmin(distToL)]
else:
newLabel = unique[np.argmax(counts)]
NewSamples[i, 0] = newLabel
return NewSamples
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.argmax",
"matplotlib.pyplot.scatter",
"numpy.argmin",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.unique"
] | [((253, 348), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Data[:, 1][Data[:, 0] == 0]', 'Data[:, 2][Data[:, 0] == 0]'], {'color': '"""red"""', 's': 'size'}), "(Data[:, 1][Data[:, 0] == 0], Data[:, 2][Data[:, 0] == 0], color\n ='red', s=size)\n", (264, 348), True, 'from matplotlib import pyplot as plt\n'), ((343, 440), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Data[:, 1][Data[:, 0] == 1]', 'Data[:, 2][Data[:, 0] == 1]'], {'color': '"""green"""', 's': 'size'}), "(Data[:, 1][Data[:, 0] == 1], Data[:, 2][Data[:, 0] == 1], color\n ='green', s=size)\n", (354, 440), True, 'from matplotlib import pyplot as plt\n'), ((435, 531), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Data[:, 1][Data[:, 0] == 2]', 'Data[:, 2][Data[:, 0] == 2]'], {'color': '"""blue"""', 's': 'size'}), "(Data[:, 1][Data[:, 0] == 2], Data[:, 2][Data[:, 0] == 2], color\n ='blue', s=size)\n", (446, 531), True, 'from matplotlib import pyplot as plt\n'), ((180, 192), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (190, 192), True, 'from matplotlib import pyplot as plt\n'), ((228, 245), 'matplotlib.pyplot.title', 'plt.title', (['titleT'], {}), '(titleT)\n', (237, 245), True, 'from matplotlib import pyplot as plt\n'), ((546, 556), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (554, 556), True, 'from matplotlib import pyplot as plt\n'), ((825, 844), 'numpy.argsort', 'np.argsort', (['distVec'], {}), '(distVec)\n', (835, 844), True, 'import numpy as np\n'), ((909, 957), 'numpy.unique', 'np.unique', (['Data[kNearest, 0]'], {'return_counts': '(True)'}), '(Data[kNearest, 0], return_counts=True)\n', (918, 957), True, 'import numpy as np\n'), ((977, 991), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (983, 991), True, 'import numpy as np\n'), ((745, 799), 'numpy.sum', 'np.sum', (['((Data[:, 1:] - NewSamples[i, 1:]) ** 2)'], {'axis': '(1)'}), '((Data[:, 1:] - NewSamples[i, 1:]) ** 2, axis=1)\n', (751, 799), True, 'import numpy as np\n'), ((1213, 1270), 'numpy.sum', 'np.sum', (['distVec[kNearest][Data[kNearest, 0] == labArr[j]]'], {}), '(distVec[kNearest][Data[kNearest, 0] == labArr[j]])\n', (1219, 1270), True, 'import numpy as np\n'), ((1304, 1322), 'numpy.argmin', 'np.argmin', (['distToL'], {}), '(distToL)\n', (1313, 1322), True, 'import numpy as np\n'), ((1374, 1391), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (1383, 1391), True, 'import numpy as np\n')] |
import numpy as np
x=1.0
y=2.0
#exponents and logarithms
print(np.exp(x)) #e^x
print(np.log(x)) #ln x
print(np.log10(x)) #log_10 x
print(np.log2(x)) #log_2 x
#min/max/misc
print(np.fabs(x))
print(np.fmin(x,y))
print(np.fmax(x,y))
#populate arrays
n=100
z=np.arange(n,dtype=float)
z*=2.0*np.pi/float(n-1)
sin_z=np.sin(z)
#interpolation
print(np.interp(0.75,z,sin_z))
print(np.sin(0.75)) | [
"numpy.fmin",
"numpy.fmax",
"numpy.log",
"numpy.log2",
"numpy.sin",
"numpy.arange",
"numpy.exp",
"numpy.fabs",
"numpy.interp",
"numpy.log10"
] | [((263, 288), 'numpy.arange', 'np.arange', (['n'], {'dtype': 'float'}), '(n, dtype=float)\n', (272, 288), True, 'import numpy as np\n'), ((318, 327), 'numpy.sin', 'np.sin', (['z'], {}), '(z)\n', (324, 327), True, 'import numpy as np\n'), ((65, 74), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (71, 74), True, 'import numpy as np\n'), ((88, 97), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (94, 97), True, 'import numpy as np\n'), ((112, 123), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (120, 123), True, 'import numpy as np\n'), ((142, 152), 'numpy.log2', 'np.log2', (['x'], {}), '(x)\n', (149, 152), True, 'import numpy as np\n'), ((185, 195), 'numpy.fabs', 'np.fabs', (['x'], {}), '(x)\n', (192, 195), True, 'import numpy as np\n'), ((203, 216), 'numpy.fmin', 'np.fmin', (['x', 'y'], {}), '(x, y)\n', (210, 216), True, 'import numpy as np\n'), ((223, 236), 'numpy.fmax', 'np.fmax', (['x', 'y'], {}), '(x, y)\n', (230, 236), True, 'import numpy as np\n'), ((350, 375), 'numpy.interp', 'np.interp', (['(0.75)', 'z', 'sin_z'], {}), '(0.75, z, sin_z)\n', (359, 375), True, 'import numpy as np\n'), ((381, 393), 'numpy.sin', 'np.sin', (['(0.75)'], {}), '(0.75)\n', (387, 393), True, 'import numpy as np\n')] |
"""
Data preparation script for GNN tracking.
This script processes h5 files of the ntuple and produces graph data on disk.
Will also save a csv file of the time to build each graph
The differences between Savannah's code is:
- CMS geometry instead of TrackML geometry (main difference is just the layers that connect to each other and their numbering)
- Intersecting lines capability removed (should be added back in)
"""
# System
import os
import sys
import time
import argparse
import logging
import multiprocessing as mp
from functools import partial
sys.path.append("../")
# Externals
import yaml
import pickle
import numpy as np
import pandas as pd
import csv
# Locals
from collections import namedtuple
Graph = namedtuple('Graph', ['x', 'edge_attr', 'edge_index', 'y', 'pid', 'pt', 'eta'])
# the following will create a list of accepted layer transistions
# there are 4 inner barrel layers
l = np.arange(1,5)
# creates cobinations (1,2), (2,3) etc.
layer_pairs = np.stack([l[:-1], l[1:]], axis=1)
n_det_layers = 18
# left_side endcap, creates (5,6), (6,7) etc.
EC_L = np.arange(5, 17)
EC_L_pairs = np.stack([EC_L[:-1], EC_L[1:]], axis=1)
layer_pairs = np.concatenate((layer_pairs, EC_L_pairs), axis=0)
# right side endcap
EC_R = np.arange(17, 29)
EC_R_pairs = np.stack([EC_R[:-1], EC_R[1:]], axis=1)
layer_pairs = np.concatenate((layer_pairs, EC_R_pairs), axis=0)
# transitions between any barrel layer and nearest endcap layer also allowed
barrel_EC_L_pairs = np.array([(1,5), (2,5), (3,5), (4,5)])
barrel_EC_R_pairs = np.array([(1,17), (2,17), (3,17), (4,17)])
layer_pairs = np.concatenate((layer_pairs, barrel_EC_L_pairs), axis=0)
layer_pairs = np.concatenate((layer_pairs, barrel_EC_R_pairs), axis=0)
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser('prepare.py')
add_arg = parser.add_argument
add_arg('config', nargs='?', default='configs/geometric.yaml')
add_arg('--n-workers', type=int, default=1)
add_arg('--task', type=int, default=0)
add_arg('--n-tasks', type=int, default=1)
add_arg('-v', '--verbose', action='store_true')
add_arg('--show-config', action='store_true')
add_arg('--interactive', action='store_true')
add_arg('--start-evt', type=int, default=1000)
add_arg('--end-evt', type=int, default=3000)
return parser.parse_args()
# Construct the graph
def calc_dphi(phi1, phi2):
"""Computes phi2-phi1 given in range [-pi,pi]"""
dphi = phi2 - phi1
dphi[dphi > np.pi] -= 2*np.pi
dphi[dphi < -np.pi] += 2*np.pi
return dphi
def calc_eta(r, z):
theta = np.arctan2(r, z)
return -1. * np.log(np.tan(theta / 2.))
def select_segments(hits1, hits2, phi_slope_max, z0_max,
layer1, layer2):
"""
Constructs a list of selected segments from the pairings
between hits1 and hits2, filtered with the specified
phi slope and z0 criteria.
Returns: pd DataFrame of (index_1, index_2), corresponding to the
DataFrame hit label-indices in hits1 and hits2, respectively.
"""
# Start with all possible pairs of hits
hit_pairs = hits1.reset_index().merge(hits2.reset_index(), on='evt', suffixes=('_1', '_2'))
#print(hit_pairs)
# Compute line through the points
dphi = calc_dphi(hit_pairs.phi_1, hit_pairs.phi_2)
dz = hit_pairs.z_2 - hit_pairs.z_1
dr = hit_pairs.r_2 - hit_pairs.r_1
eta_1 = calc_eta(hit_pairs.r_1, hit_pairs.z_1)
eta_2 = calc_eta(hit_pairs.r_2, hit_pairs.z_2)
deta = eta_2 - eta_1
dR = np.sqrt(deta**2 + dphi**2)
phi_slope = dphi / dr
z0 = hit_pairs.z_1 - hit_pairs.r_1 * dz / dr
# Filter segments according to phi slope and z0 criteria
good_seg_mask = ((phi_slope.abs() < phi_slope_max) &
(z0.abs() < z0_max))
dr = dr[good_seg_mask]
dphi = dphi[good_seg_mask]
dz = dz[good_seg_mask]
dR = dR[good_seg_mask]
return hit_pairs[good_seg_mask], dr, dphi, dz, dR
def construct_graph(hits, layer_pairs, phi_slope_max, z0_max,
feature_names, feature_scale, evt="-1"):
"""Construct one graph (i.e. from one event)
The graph contains:
- Node information: r,
- Edge information: dr, dR, dz, dphi
- Particle: Particle id, momentum and eta
- y label: 1 if a true edge, 0 otherwise
"""
t0 = time.time()
# Loop over layer pairs and construct segments
segments = []
seg_dr, seg_dphi, seg_dz, seg_dR = [], [], [], []
# for all accepted layer combinations construct segments
for (layer1, layer2) in layer_pairs:
# Find and join all hit pairs for one combo of layers at a time
try:
hits1 = hits[hits['layer_id']==layer1]
hits2 = hits[hits['layer_id']==layer2]
# If an event has no hits on a layer, we get a KeyError.
# In that case we just skip to the next layer pair
except KeyError as e:
logging.info('skipping empty layer: %s' % e)
continue
# Construct the segments
selected, dr, dphi, dz, dR = select_segments(hits1, hits2, phi_slope_max, z0_max,
layer1, layer2)
segments.append(selected)
seg_dr.append(dr)
seg_dphi.append(dphi)
seg_dz.append(dz)
seg_dR.append(dR)
# Combine segments from all layer pairs
#segmetns contains the index in the hit data frame of the two hits that may be connected
segments = pd.concat(segments)
seg_dr, seg_dphi = pd.concat(seg_dr), pd.concat(seg_dphi)
seg_dz, seg_dR = pd.concat(seg_dz), pd.concat(seg_dR)
#print("hits", hits)
#print("segments", segments)
# Prepare the graph matrices
n_hits = hits.shape[0]
n_edges = segments.shape[0]
# node information
X = (hits[feature_names].values / feature_scale).astype(np.float32)
# edge information
edge_attr = np.stack((seg_dr/feature_scale[0],
seg_dphi/feature_scale[1],
seg_dz/feature_scale[2],
seg_dR))
# initialise as zeros
y = np.zeros(n_edges, dtype=np.float32)
# pytorch expects edge connections numbered from 0 to the number of edges
# right now they are labelled by the hit id, so we need to convert
hit_idx = pd.Series(np.arange(n_hits), index=hits.index)
#seg start is a list of all the segment starts where the number is the edge as enumerated above
seg_start = hit_idx.loc[segments.index_1].values
seg_end = hit_idx.loc[segments.index_2].values
# connect starts and ends
edge_index = np.stack((seg_start, seg_end))
pid = hits.particle_id
pt = hits.sim_pt
eta = hits.sim_eta
# is is 1 if the segments have the same particle, otherwise 0
y = [int (x) for x in segments.particle_id_1 == segments.particle_id_2]
print("... completed in {0} seconds".format(time.time()-t0))
return Graph(X, edge_attr, edge_index, y, pid, pt, eta)
def select_hits(hits, pt_min=0):
"""Subset hits based on particle momentum and drop duplicate hits"""
hits = hits[hits['sim_pt'] > pt_min]
# consider the row a duplicate if the particle has the same id and the hit has the same position and another row
hits = hits.drop_duplicates(subset=['particle_id', 'layer_id', 'x', 'y', 'z'])
# if multiple hits in one layer, this selects the one with the smallest dxdy_sig value
# if the simdxy is the same for the same layer hits, it'll just select the first value. Should be changed to e.g. min r value
hits = hits.loc[hits.groupby(['particle_id', 'layer_id']).sim_dxy_sig.idxmin().values]
return hits
def process_event(file, output_dir, pt_min, eta_range, phi_range, phi_slope_max, z0_max):
"""
Calls all necessary functions to build graph
Inputs: file name, directory to write result to, range of eta and phi, max phi slope and z0
Returns: Savens the built graphs to output directory
"""
# Load the data
data = pd.read_hdf(file)
#subset by useful columns
data = data[['evt', 'hit_id', 'x', 'y', 'z', 'r', 'sim_pt', 'sim_eta', 'sim_phi', 'particle_id', 'volume_id', 'layer_id', 'sim_dxy_sig']]
#calculate phi of hit
data['phi'] = np.arctan2(data.y, data.x)
# extract the event number from file name
evt = int(file.split("ntuple_PU200_event")[1].split('.h5')[0].strip())
#evt = int(evt_number)
#evt = file
logging.info('Event %i, loading data' % evt)
# Apply hit selection
logging.info('Event %i, selecting hits' % evt)
#apply pt cut and remove duplciates. Add new column for evt
hits = select_hits(data, pt_min=pt_min)
# Graph features and scale
feature_names = ['r', 'phi', 'z']
#
feature_scale = np.array([1000., np.pi, 1000.])
logging.info('Event %i, constructing graphs' % evt)
graph = construct_graph(hits, layer_pairs=layer_pairs,
phi_slope_max=phi_slope_max, z0_max=z0_max,
feature_names=feature_names,
feature_scale=feature_scale,
evt=evt)
# Write these graphs to the output directory
try:
filename = os.path.join(output_dir,'event%s_g000' % (evt))
except Exception as e:
logging.info(e)
logging.info('Event %i, writing graphs', evt)
np.savez(filename, ** dict(x=graph.x, edge_attr=graph.edge_attr,
edge_index=graph.edge_index,
y=graph.y, pid=graph.pid, pt=graph.pt, eta=graph.eta))
def main():
"""Main function"""
# Parse the command line
args = parse_args()
# Setup logging
log_format = '%(asctime)s %(levelname)s %(message)s'
log_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=log_level, format=log_format)
logging.info('Initializing')
if args.show_config:
logging.info('Command line config: %s' % args)
# Load configuration
with open(args.config) as f:
config = yaml.load(f, yaml.FullLoader)
if args.task == 0:
logging.info('Configuration: %s' % config)
# Find the input files
input_dir = config['input_dir']
# list all files in input directory
all_files = [os.path.join(input_dir, file) for file in os.listdir(input_dir)]
# Prepare output
output_dir = os.path.expandvars(config['output_dir'])
os.makedirs(output_dir, exist_ok=True)
logging.info('Writing outputs to ' + output_dir)
# Process input files with a worker pool
t0 = time.time()
with mp.Pool(processes=args.n_workers) as pool:
process_func = partial(process_event, output_dir=output_dir,
phi_range=(-np.pi, np.pi), **config['selection'])
pool.map(process_func, all_files)
t1 = time.time()
print("Finished in", t1-t0, "seconds")
# write timing results to a file
column_names = ['pt_min', 'z0_max', 'phi_max', 'total time', 'number of events', 'mean time per event']
times = {'pt_min': config['selection']['pt_min'],
'z0_max': config['selection']['z0_max'],
'phi_max': config['selection']['phi_slope_max'],
'total time': t1-t0,
'number of events': len(all_files),
'mean time per event': (t1-t0)/len(all_files)}
timing_file = 'graph_building_timing.csv'
with open (timing_file, 'a') as csvfile:
writer = csv.DictWriter(csvfile, delimiter=',', lineterminator='\n',fieldnames=column_names)
if not os.path.isfile(timing_file):
writer.writeheader() # file doesn't exist yet, write a header
writer.writerow(times)
# Drop to IPython interactive shell
if args.interactive:
logging.info('Starting IPython interactive session')
import IPython
IPython.embed()
logging.info('All done!')
if __name__ == '__main__':
main()
| [
"yaml.load",
"numpy.arctan2",
"argparse.ArgumentParser",
"os.path.isfile",
"numpy.arange",
"os.path.join",
"csv.DictWriter",
"sys.path.append",
"pandas.read_hdf",
"IPython.embed",
"numpy.tan",
"pandas.concat",
"numpy.stack",
"functools.partial",
"os.path.expandvars",
"multiprocessing.P... | [((569, 591), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (584, 591), False, 'import sys\n'), ((737, 815), 'collections.namedtuple', 'namedtuple', (['"""Graph"""', "['x', 'edge_attr', 'edge_index', 'y', 'pid', 'pt', 'eta']"], {}), "('Graph', ['x', 'edge_attr', 'edge_index', 'y', 'pid', 'pt', 'eta'])\n", (747, 815), False, 'from collections import namedtuple\n'), ((925, 940), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (934, 940), True, 'import numpy as np\n'), ((995, 1028), 'numpy.stack', 'np.stack', (['[l[:-1], l[1:]]'], {'axis': '(1)'}), '([l[:-1], l[1:]], axis=1)\n', (1003, 1028), True, 'import numpy as np\n'), ((1102, 1118), 'numpy.arange', 'np.arange', (['(5)', '(17)'], {}), '(5, 17)\n', (1111, 1118), True, 'import numpy as np\n'), ((1132, 1171), 'numpy.stack', 'np.stack', (['[EC_L[:-1], EC_L[1:]]'], {'axis': '(1)'}), '([EC_L[:-1], EC_L[1:]], axis=1)\n', (1140, 1171), True, 'import numpy as np\n'), ((1186, 1235), 'numpy.concatenate', 'np.concatenate', (['(layer_pairs, EC_L_pairs)'], {'axis': '(0)'}), '((layer_pairs, EC_L_pairs), axis=0)\n', (1200, 1235), True, 'import numpy as np\n'), ((1264, 1281), 'numpy.arange', 'np.arange', (['(17)', '(29)'], {}), '(17, 29)\n', (1273, 1281), True, 'import numpy as np\n'), ((1295, 1334), 'numpy.stack', 'np.stack', (['[EC_R[:-1], EC_R[1:]]'], {'axis': '(1)'}), '([EC_R[:-1], EC_R[1:]], axis=1)\n', (1303, 1334), True, 'import numpy as np\n'), ((1349, 1398), 'numpy.concatenate', 'np.concatenate', (['(layer_pairs, EC_R_pairs)'], {'axis': '(0)'}), '((layer_pairs, EC_R_pairs), axis=0)\n', (1363, 1398), True, 'import numpy as np\n'), ((1498, 1540), 'numpy.array', 'np.array', (['[(1, 5), (2, 5), (3, 5), (4, 5)]'], {}), '([(1, 5), (2, 5), (3, 5), (4, 5)])\n', (1506, 1540), True, 'import numpy as np\n'), ((1557, 1603), 'numpy.array', 'np.array', (['[(1, 17), (2, 17), (3, 17), (4, 17)]'], {}), '([(1, 17), (2, 17), (3, 17), (4, 17)])\n', (1565, 1603), True, 'import numpy as np\n'), ((1614, 1670), 'numpy.concatenate', 'np.concatenate', (['(layer_pairs, barrel_EC_L_pairs)'], {'axis': '(0)'}), '((layer_pairs, barrel_EC_L_pairs), axis=0)\n', (1628, 1670), True, 'import numpy as np\n'), ((1685, 1741), 'numpy.concatenate', 'np.concatenate', (['(layer_pairs, barrel_EC_R_pairs)'], {'axis': '(0)'}), '((layer_pairs, barrel_EC_R_pairs), axis=0)\n', (1699, 1741), True, 'import numpy as np\n'), ((1814, 1851), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""prepare.py"""'], {}), "('prepare.py')\n", (1837, 1851), False, 'import argparse\n'), ((2619, 2635), 'numpy.arctan2', 'np.arctan2', (['r', 'z'], {}), '(r, z)\n', (2629, 2635), True, 'import numpy as np\n'), ((3552, 3582), 'numpy.sqrt', 'np.sqrt', (['(deta ** 2 + dphi ** 2)'], {}), '(deta ** 2 + dphi ** 2)\n', (3559, 3582), True, 'import numpy as np\n'), ((4398, 4409), 'time.time', 'time.time', ([], {}), '()\n', (4407, 4409), False, 'import time\n'), ((5571, 5590), 'pandas.concat', 'pd.concat', (['segments'], {}), '(segments)\n', (5580, 5590), True, 'import pandas as pd\n'), ((6002, 6107), 'numpy.stack', 'np.stack', (['(seg_dr / feature_scale[0], seg_dphi / feature_scale[1], seg_dz /\n feature_scale[2], seg_dR)'], {}), '((seg_dr / feature_scale[0], seg_dphi / feature_scale[1], seg_dz /\n feature_scale[2], seg_dR))\n', (6010, 6107), True, 'import numpy as np\n'), ((6213, 6248), 'numpy.zeros', 'np.zeros', (['n_edges'], {'dtype': 'np.float32'}), '(n_edges, dtype=np.float32)\n', (6221, 6248), True, 'import numpy as np\n'), ((6719, 6749), 'numpy.stack', 'np.stack', (['(seg_start, seg_end)'], {}), '((seg_start, seg_end))\n', (6727, 6749), True, 'import numpy as np\n'), ((8127, 8144), 'pandas.read_hdf', 'pd.read_hdf', (['file'], {}), '(file)\n', (8138, 8144), True, 'import pandas as pd\n'), ((8363, 8389), 'numpy.arctan2', 'np.arctan2', (['data.y', 'data.x'], {}), '(data.y, data.x)\n', (8373, 8389), True, 'import numpy as np\n'), ((8562, 8606), 'logging.info', 'logging.info', (["('Event %i, loading data' % evt)"], {}), "('Event %i, loading data' % evt)\n", (8574, 8606), False, 'import logging\n'), ((8638, 8684), 'logging.info', 'logging.info', (["('Event %i, selecting hits' % evt)"], {}), "('Event %i, selecting hits' % evt)\n", (8650, 8684), False, 'import logging\n'), ((8902, 8935), 'numpy.array', 'np.array', (['[1000.0, np.pi, 1000.0]'], {}), '([1000.0, np.pi, 1000.0])\n', (8910, 8935), True, 'import numpy as np\n'), ((8944, 8995), 'logging.info', 'logging.info', (["('Event %i, constructing graphs' % evt)"], {}), "('Event %i, constructing graphs' % evt)\n", (8956, 8995), False, 'import logging\n'), ((9472, 9517), 'logging.info', 'logging.info', (['"""Event %i, writing graphs"""', 'evt'], {}), "('Event %i, writing graphs', evt)\n", (9484, 9517), False, 'import logging\n'), ((9992, 10047), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'log_level', 'format': 'log_format'}), '(level=log_level, format=log_format)\n', (10011, 10047), False, 'import logging\n'), ((10052, 10080), 'logging.info', 'logging.info', (['"""Initializing"""'], {}), "('Initializing')\n", (10064, 10080), False, 'import logging\n'), ((10566, 10606), 'os.path.expandvars', 'os.path.expandvars', (["config['output_dir']"], {}), "(config['output_dir'])\n", (10584, 10606), False, 'import os\n'), ((10611, 10649), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (10622, 10649), False, 'import os\n'), ((10654, 10702), 'logging.info', 'logging.info', (["('Writing outputs to ' + output_dir)"], {}), "('Writing outputs to ' + output_dir)\n", (10666, 10702), False, 'import logging\n'), ((10758, 10769), 'time.time', 'time.time', ([], {}), '()\n', (10767, 10769), False, 'import time\n'), ((11023, 11034), 'time.time', 'time.time', ([], {}), '()\n', (11032, 11034), False, 'import time\n'), ((12081, 12106), 'logging.info', 'logging.info', (['"""All done!"""'], {}), "('All done!')\n", (12093, 12106), False, 'import logging\n'), ((5614, 5631), 'pandas.concat', 'pd.concat', (['seg_dr'], {}), '(seg_dr)\n', (5623, 5631), True, 'import pandas as pd\n'), ((5633, 5652), 'pandas.concat', 'pd.concat', (['seg_dphi'], {}), '(seg_dphi)\n', (5642, 5652), True, 'import pandas as pd\n'), ((5674, 5691), 'pandas.concat', 'pd.concat', (['seg_dz'], {}), '(seg_dz)\n', (5683, 5691), True, 'import pandas as pd\n'), ((5693, 5710), 'pandas.concat', 'pd.concat', (['seg_dR'], {}), '(seg_dR)\n', (5702, 5710), True, 'import pandas as pd\n'), ((6427, 6444), 'numpy.arange', 'np.arange', (['n_hits'], {}), '(n_hits)\n', (6436, 6444), True, 'import numpy as np\n'), ((9364, 9410), 'os.path.join', 'os.path.join', (['output_dir', "('event%s_g000' % evt)"], {}), "(output_dir, 'event%s_g000' % evt)\n", (9376, 9410), False, 'import os\n'), ((10114, 10160), 'logging.info', 'logging.info', (["('Command line config: %s' % args)"], {}), "('Command line config: %s' % args)\n", (10126, 10160), False, 'import logging\n'), ((10237, 10266), 'yaml.load', 'yaml.load', (['f', 'yaml.FullLoader'], {}), '(f, yaml.FullLoader)\n', (10246, 10266), False, 'import yaml\n'), ((10298, 10340), 'logging.info', 'logging.info', (["('Configuration: %s' % config)"], {}), "('Configuration: %s' % config)\n", (10310, 10340), False, 'import logging\n'), ((10462, 10491), 'os.path.join', 'os.path.join', (['input_dir', 'file'], {}), '(input_dir, file)\n', (10474, 10491), False, 'import os\n'), ((10779, 10812), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'args.n_workers'}), '(processes=args.n_workers)\n', (10786, 10812), True, 'import multiprocessing as mp\n'), ((10845, 10945), 'functools.partial', 'partial', (['process_event'], {'output_dir': 'output_dir', 'phi_range': '(-np.pi, np.pi)'}), "(process_event, output_dir=output_dir, phi_range=(-np.pi, np.pi), **\n config['selection'])\n", (10852, 10945), False, 'from functools import partial\n'), ((11657, 11746), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'delimiter': '""","""', 'lineterminator': '"""\n"""', 'fieldnames': 'column_names'}), "(csvfile, delimiter=',', lineterminator='\\n', fieldnames=\n column_names)\n", (11671, 11746), False, 'import csv\n'), ((11976, 12028), 'logging.info', 'logging.info', (['"""Starting IPython interactive session"""'], {}), "('Starting IPython interactive session')\n", (11988, 12028), False, 'import logging\n'), ((12060, 12075), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (12073, 12075), False, 'import IPython\n'), ((2660, 2679), 'numpy.tan', 'np.tan', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (2666, 2679), True, 'import numpy as np\n'), ((9448, 9463), 'logging.info', 'logging.info', (['e'], {}), '(e)\n', (9460, 9463), False, 'import logging\n'), ((10504, 10525), 'os.listdir', 'os.listdir', (['input_dir'], {}), '(input_dir)\n', (10514, 10525), False, 'import os\n'), ((11765, 11792), 'os.path.isfile', 'os.path.isfile', (['timing_file'], {}), '(timing_file)\n', (11779, 11792), False, 'import os\n'), ((4995, 5039), 'logging.info', 'logging.info', (["('skipping empty layer: %s' % e)"], {}), "('skipping empty layer: %s' % e)\n", (5007, 5039), False, 'import logging\n'), ((7022, 7033), 'time.time', 'time.time', ([], {}), '()\n', (7031, 7033), False, 'import time\n')] |
# -*- coding: UTF-8 -*-
import tensorflow.compat.v1 as tf
import os, sys
import numpy as np
from PIL import Image
import cv2 as cv
def image_processing(image, height, width):
# 解码图像数据
image = tf.image.decode_jpeg(image, channels=3)
# 归一化,归一化区间是[-1, 1]
image = (tf.cast(image, dtype=tf.float32) - 127.0) / 128.0
# Resize函数要求是对batch进行,所以先扩增一个维度
image = tf.expand_dims(image, 0)
# Resize到指定大小。
image = tf.image.resize_bilinear(image, [height, width])
image = tf.squeeze(image, [0])
return image
def batch_inputs(feature_map, data_files, height=2048, width=2448,
batch_size=1, is_train=True, num_readers=1, num_preprocess_threads=4):
# feature_map: 对应proto的数据映射。
# data_files: list类型,存放的是tfrecord的文件列表。
# batch_size: 一个批次batch的大小。
# is_train: DataProvider在train和test节点的表现形式有所不同,主要test时并不需要一个循环队列。
# num_reader: 每一个线程reader的个数。
# num_preprocess_threads: 处理数据的线程的个数。
with tf.name_scope('reader_defination'):
# 创建文件队列,如果是训练,创建一个随机文件队列,如果是测试,创建一个顺序文件队列。
if is_train:
filename_queue = tf.train.string_input_producer(data_files, shuffle=True, capacity=16)
else:
filename_queue = tf.train.string_input_producer(data_files, shuffle=False, capacity=1)
# reader的个数至少为1。
num_readers = 1 if num_readers < 1 else num_readers
if num_readers > 1:
# 定义缓冲池的大小。
examples_per_shard = 1024
min_queue_examples = examples_per_shard * 16
if is_train:
examples_queue = tf.RandomShuffleQueue(capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples,
dtypes=[tf.string])
else:
examples_queue = tf.FIFOQueue(capacity=examples_per_shard + 3 * batch_size,
dtypes=[tf.string])
# 多个reader时对reader队列进行管理。
enqueue_ops = []
for _ in range(num_readers):
reader = tf.TFRecordReader()
_, value = reader.read(filename_queue)
enqueue_ops.append(examples_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))
example_serialized = examples_queue.dequeue()
else:
reader = tf.TFRecordReader()
_, example_serialized = reader.read(filename_queue)
samples = []
for _ in range(num_preprocess_threads):
features = tf.parse_single_example(example_serialized, feature_map)
samples.append([image_processing(features['image/encoded'], height, width), features['image/format']])
batch_data = tf.train.batch_join(samples, batch_size=batch_size,
capacity=2 * num_preprocess_threads * batch_size)
data = tf.reshape(batch_data[0], [batch_size, -1])
label = tf.reshape(batch_data[1], [batch_size])
return (data, label)
if __name__ == '__main__':
data_files = [os.path.join(sys.argv[1], f) for f in os.listdir(sys.argv[1])]
IMAGE_FEATURE_MAP_SUB = {
'image/width': tf.io.FixedLenFeature([], tf.int64),
'image/height': tf.io.FixedLenFeature([], tf.int64),
'image/filename': tf.io.FixedLenFeature([], tf.string),
'image/source_id': tf.io.FixedLenFeature([], tf.string),
'image/key/sha256': tf.io.FixedLenFeature([], tf.string),
'image/encoded': tf.io.FixedLenFeature([], tf.string),
'image/format': tf.io.FixedLenFeature([], tf.string),
'image/object/bbox/xmin': tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymin': tf.io.VarLenFeature(tf.float32),
'image/object/bbox/xmax': tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymax': tf.io.VarLenFeature(tf.float32),
'image/object/class/text': tf.io.VarLenFeature(tf.string),
'image/object/class/label': tf.io.VarLenFeature(tf.int64),
}
feature_map = {'label':tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'data':tf.FixedLenFeature([], dtype=tf.string)}
with tf.Graph().as_default(), \
tf.Session(config=tf.ConfigProto(allow_soft_placement = True)) as session:
data, labels = batch_inputs(IMAGE_FEATURE_MAP_SUB, data_files, batch_size=1, is_train=True)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=session, coord=coord)
tf.train.start_queue_runners(sess=session)
for i in range(100):
data_numpy, labels_numpy = session.run([data, labels])
for d, l in zip(data_numpy, labels_numpy):
# 保存成对应的图像。
d_pixel_vector = (d * 128 + 127).astype(np.uint8)
d_pixel_2d = np.reshape(d_pixel_vector, [28, 28, -1])
Image.fromarray(d_pixel_2d).convert('L').save('%d.jpg' %l)
coord.request_stop()
coord.join(threads) | [
"tensorflow.compat.v1.train.batch_join",
"tensorflow.compat.v1.train.string_input_producer",
"os.path.join",
"tensorflow.compat.v1.image.resize_bilinear",
"tensorflow.compat.v1.name_scope",
"tensorflow.compat.v1.FIFOQueue",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.expand_dims",
"numpy.r... | [((201, 240), 'tensorflow.compat.v1.image.decode_jpeg', 'tf.image.decode_jpeg', (['image'], {'channels': '(3)'}), '(image, channels=3)\n', (221, 240), True, 'import tensorflow.compat.v1 as tf\n'), ((376, 400), 'tensorflow.compat.v1.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (390, 400), True, 'import tensorflow.compat.v1 as tf\n'), ((432, 480), 'tensorflow.compat.v1.image.resize_bilinear', 'tf.image.resize_bilinear', (['image', '[height, width]'], {}), '(image, [height, width])\n', (456, 480), True, 'import tensorflow.compat.v1 as tf\n'), ((493, 515), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['image', '[0]'], {}), '(image, [0])\n', (503, 515), True, 'import tensorflow.compat.v1 as tf\n'), ((953, 987), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""reader_defination"""'], {}), "('reader_defination')\n", (966, 987), True, 'import tensorflow.compat.v1 as tf\n'), ((2887, 2992), 'tensorflow.compat.v1.train.batch_join', 'tf.train.batch_join', (['samples'], {'batch_size': 'batch_size', 'capacity': '(2 * num_preprocess_threads * batch_size)'}), '(samples, batch_size=batch_size, capacity=2 *\n num_preprocess_threads * batch_size)\n', (2906, 2992), True, 'import tensorflow.compat.v1 as tf\n'), ((3062, 3105), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['batch_data[0]', '[batch_size, -1]'], {}), '(batch_data[0], [batch_size, -1])\n', (3072, 3105), True, 'import tensorflow.compat.v1 as tf\n'), ((3122, 3161), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['batch_data[1]', '[batch_size]'], {}), '(batch_data[1], [batch_size])\n', (3132, 3161), True, 'import tensorflow.compat.v1 as tf\n'), ((3250, 3278), 'os.path.join', 'os.path.join', (['sys.argv[1]', 'f'], {}), '(sys.argv[1], f)\n', (3262, 3278), False, 'import os, sys\n'), ((3366, 3401), 'tensorflow.compat.v1.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (3387, 3401), True, 'import tensorflow.compat.v1 as tf\n'), ((3427, 3462), 'tensorflow.compat.v1.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (3448, 3462), True, 'import tensorflow.compat.v1 as tf\n'), ((3490, 3526), 'tensorflow.compat.v1.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (3511, 3526), True, 'import tensorflow.compat.v1 as tf\n'), ((3555, 3591), 'tensorflow.compat.v1.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (3576, 3591), True, 'import tensorflow.compat.v1 as tf\n'), ((3621, 3657), 'tensorflow.compat.v1.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (3642, 3657), True, 'import tensorflow.compat.v1 as tf\n'), ((3684, 3720), 'tensorflow.compat.v1.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (3705, 3720), True, 'import tensorflow.compat.v1 as tf\n'), ((3746, 3782), 'tensorflow.compat.v1.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (3767, 3782), True, 'import tensorflow.compat.v1 as tf\n'), ((3818, 3849), 'tensorflow.compat.v1.io.VarLenFeature', 'tf.io.VarLenFeature', (['tf.float32'], {}), '(tf.float32)\n', (3837, 3849), True, 'import tensorflow.compat.v1 as tf\n'), ((3885, 3916), 'tensorflow.compat.v1.io.VarLenFeature', 'tf.io.VarLenFeature', (['tf.float32'], {}), '(tf.float32)\n', (3904, 3916), True, 'import tensorflow.compat.v1 as tf\n'), ((3952, 3983), 'tensorflow.compat.v1.io.VarLenFeature', 'tf.io.VarLenFeature', (['tf.float32'], {}), '(tf.float32)\n', (3971, 3983), True, 'import tensorflow.compat.v1 as tf\n'), ((4019, 4050), 'tensorflow.compat.v1.io.VarLenFeature', 'tf.io.VarLenFeature', (['tf.float32'], {}), '(tf.float32)\n', (4038, 4050), True, 'import tensorflow.compat.v1 as tf\n'), ((4087, 4117), 'tensorflow.compat.v1.io.VarLenFeature', 'tf.io.VarLenFeature', (['tf.string'], {}), '(tf.string)\n', (4106, 4117), True, 'import tensorflow.compat.v1 as tf\n'), ((4155, 4184), 'tensorflow.compat.v1.io.VarLenFeature', 'tf.io.VarLenFeature', (['tf.int64'], {}), '(tf.int64)\n', (4174, 4184), True, 'import tensorflow.compat.v1 as tf\n'), ((4220, 4276), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['[]'], {'dtype': 'tf.int64', 'default_value': '(-1)'}), '([], dtype=tf.int64, default_value=-1)\n', (4238, 4276), True, 'import tensorflow.compat.v1 as tf\n'), ((4305, 4344), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['[]'], {'dtype': 'tf.string'}), '([], dtype=tf.string)\n', (4323, 4344), True, 'import tensorflow.compat.v1 as tf\n'), ((4589, 4611), 'tensorflow.compat.v1.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (4609, 4611), True, 'import tensorflow.compat.v1 as tf\n'), ((4630, 4685), 'tensorflow.compat.v1.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'session', 'coord': 'coord'}), '(sess=session, coord=coord)\n', (4658, 4685), True, 'import tensorflow.compat.v1 as tf\n'), ((4694, 4736), 'tensorflow.compat.v1.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'session'}), '(sess=session)\n', (4722, 4736), True, 'import tensorflow.compat.v1 as tf\n'), ((278, 310), 'tensorflow.compat.v1.cast', 'tf.cast', (['image'], {'dtype': 'tf.float32'}), '(image, dtype=tf.float32)\n', (285, 310), True, 'import tensorflow.compat.v1 as tf\n'), ((1091, 1160), 'tensorflow.compat.v1.train.string_input_producer', 'tf.train.string_input_producer', (['data_files'], {'shuffle': '(True)', 'capacity': '(16)'}), '(data_files, shuffle=True, capacity=16)\n', (1121, 1160), True, 'import tensorflow.compat.v1 as tf\n'), ((1204, 1273), 'tensorflow.compat.v1.train.string_input_producer', 'tf.train.string_input_producer', (['data_files'], {'shuffle': '(False)', 'capacity': '(1)'}), '(data_files, shuffle=False, capacity=1)\n', (1234, 1273), True, 'import tensorflow.compat.v1 as tf\n'), ((2496, 2515), 'tensorflow.compat.v1.TFRecordReader', 'tf.TFRecordReader', ([], {}), '()\n', (2513, 2515), True, 'import tensorflow.compat.v1 as tf\n'), ((2681, 2737), 'tensorflow.compat.v1.parse_single_example', 'tf.parse_single_example', (['example_serialized', 'feature_map'], {}), '(example_serialized, feature_map)\n', (2704, 2737), True, 'import tensorflow.compat.v1 as tf\n'), ((3288, 3311), 'os.listdir', 'os.listdir', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (3298, 3311), False, 'import os, sys\n'), ((1573, 1702), 'tensorflow.compat.v1.RandomShuffleQueue', 'tf.RandomShuffleQueue', ([], {'capacity': '(min_queue_examples + 3 * batch_size)', 'min_after_dequeue': 'min_queue_examples', 'dtypes': '[tf.string]'}), '(capacity=min_queue_examples + 3 * batch_size,\n min_after_dequeue=min_queue_examples, dtypes=[tf.string])\n', (1594, 1702), True, 'import tensorflow.compat.v1 as tf\n'), ((1860, 1938), 'tensorflow.compat.v1.FIFOQueue', 'tf.FIFOQueue', ([], {'capacity': '(examples_per_shard + 3 * batch_size)', 'dtypes': '[tf.string]'}), '(capacity=examples_per_shard + 3 * batch_size, dtypes=[tf.string])\n', (1872, 1938), True, 'import tensorflow.compat.v1 as tf\n'), ((2132, 2151), 'tensorflow.compat.v1.TFRecordReader', 'tf.TFRecordReader', ([], {}), '()\n', (2149, 2151), True, 'import tensorflow.compat.v1 as tf\n'), ((2339, 2401), 'tensorflow.compat.v1.train.queue_runner.QueueRunner', 'tf.train.queue_runner.QueueRunner', (['examples_queue', 'enqueue_ops'], {}), '(examples_queue, enqueue_ops)\n', (2372, 2401), True, 'import tensorflow.compat.v1 as tf\n'), ((4355, 4365), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (4363, 4365), True, 'import tensorflow.compat.v1 as tf\n'), ((4408, 4449), 'tensorflow.compat.v1.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (4422, 4449), True, 'import tensorflow.compat.v1 as tf\n'), ((5011, 5051), 'numpy.reshape', 'np.reshape', (['d_pixel_vector', '[28, 28, -1]'], {}), '(d_pixel_vector, [28, 28, -1])\n', (5021, 5051), True, 'import numpy as np\n'), ((5068, 5095), 'PIL.Image.fromarray', 'Image.fromarray', (['d_pixel_2d'], {}), '(d_pixel_2d)\n', (5083, 5095), False, 'from PIL import Image\n')] |
# -*- coding: utf-8 -*-
"""This module defines various layout objects one can add and manipulate in a template.
"""
from typing import TYPE_CHECKING, Union, List, Tuple, Optional, Dict, Any, Iterator, Iterable, \
Generator
import abc
import numpy as np
from copy import deepcopy
from .util import transform_table, BBox, BBoxArray, transform_point, get_inverse_transform
from .routing.base import Port, WireArray
import bag.io
if TYPE_CHECKING:
from .template import TemplateBase
from .routing.grid import RoutingGrid
ldim = Union[float, int]
loc_type = Tuple[ldim, ldim]
class Figure(object, metaclass=abc.ABCMeta):
"""Base class of all layout objects.
Parameters
----------
resolution : float
layout unit resolution.
"""
def __init__(self, resolution):
# type: (float) -> None
self._res = resolution
self._destroyed = False
@abc.abstractmethod
def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):
# type: (Tuple[ldim, ldim], str, bool, bool) -> Figure
"""Transform this figure."""
pass
@abc.abstractmethod
def move_by(self, dx=0, dy=0, unit_mode=False):
# type: (ldim, ldim, bool) -> None
"""Move this path by the given amount.
Parameters
----------
dx : float
the X shift.
dy : float
the Y shift.
unit_mode : bool
True if shifts are given in resolution units.
"""
pass
@property
def resolution(self):
# type: () -> float
"""Retuns the layout unit resolution."""
return self._res
@property
def destroyed(self):
# type: () -> bool
"""Returns True if this instance is destroyed"""
return self._destroyed
@property
def valid(self):
# type: () -> bool
"""Returns True if this figure is valid."""
return not self._destroyed
def check_destroyed(self):
# type: () -> None
"""Raises an exception if this object is already destroyed."""
if self._destroyed:
raise Exception('This %s is already destroyed.' % self.__class__.__name__)
def destroy(self):
# type: () -> None
"""Destroy this instance."""
self._destroyed = True
# noinspection PyAbstractClass
class Arrayable(Figure, metaclass=abc.ABCMeta):
"""A layout object with arraying support.
Also handles destroy support.
Parameters
----------
res : float
layout unit resolution.
nx : int
number of columns.
ny : int
number of rows.
spx : Union[float or int]
column pitch.
spy : Union[float or int]
row pitch.
unit_mode : bool
True if spx/spy are specified in resolution units.
"""
def __init__(self, res, nx=1, ny=1, spx=0, spy=0, unit_mode=False):
# type: (float, int, int, ldim, ldim, bool) -> None
Figure.__init__(self, res)
self._nx = nx
self._ny = ny
if unit_mode:
self._spx_unit = spx
self._spy_unit = spy
else:
self._spx_unit = int(round(spx / res))
self._spy_unit = int(round(spy / res))
@property
def nx(self):
# type: () -> int
"""Number of columns."""
return self._nx
@nx.setter
def nx(self, val):
# type: (int) -> None
"""Sets the number of columns."""
self.check_destroyed()
if val <= 0:
raise ValueError('Cannot have non-positive number of columns.')
self._nx = val
@property
def ny(self):
# type: () -> int
"""Number of rows."""
return self._ny
@ny.setter
def ny(self, val):
# type: (int) -> None
"""Sets the number of rows."""
self.check_destroyed()
if val <= 0:
raise ValueError('Cannot have non-positive number of rows.')
self._ny = val
@property
def spx(self):
# type: () -> float
"""The column pitch."""
return self._spx_unit * self.resolution
@spx.setter
def spx(self, val):
# type: (float) -> None
"""Sets the new column pitch."""
self.check_destroyed()
if val < 0:
raise ValueError('Currently does not support negative pitches.')
self._spx_unit = int(round(val / self.resolution))
@property
def spx_unit(self):
# type: () -> int
"""The column pitch in resolution units."""
return self._spx_unit
@spx_unit.setter
def spx_unit(self, val):
# type: (int) -> None
"""Sets the new column pitch in resolution units."""
self.check_destroyed()
if val < 0:
raise ValueError('Currently does not support negative pitches.')
self._spx_unit = val
@property
def spy(self):
# type: () -> float
"""The row pitch."""
return self._spy_unit * self.resolution
@spy.setter
def spy(self, val):
# type: (float) -> None
"""Sets the new row pitch."""
self.check_destroyed()
if val < 0:
raise ValueError('Currently does not support negative pitches.')
self._spy_unit = int(round(val / self.resolution))
@property
def spy_unit(self):
# type: () -> int
"""The row pitch in resolution units."""
return self._spy_unit
@spy_unit.setter
def spy_unit(self, val):
# type: (int) -> None
"""Sets the new row pitch in resolution units."""
self.check_destroyed()
if val < 0:
raise ValueError('Currently does not support negative pitches.')
self._spy_unit = val
@Figure.valid.getter
def valid(self):
# type: () -> bool
"""Returns True if this instance is valid, i.e. not destroyed and nx, ny >= 1."""
return not self.destroyed and self.nx >= 1 and self.ny >= 1
def get_item_location(self, row=0, col=0, unit_mode=False):
# type: (int, int, bool) -> Tuple[ldim, ldim]
"""Returns the location of the given item in the array.
Parameters
----------
row : int
the item row index. 0 is the bottom-most row.
col : int
the item column index. 0 is the left-most column.
unit_mode : bool
True to return coordinates in resolution units
Returns
-------
xo : Union[float, int]
the item X coordinate.
yo : Union[float, int]
the item Y coordinate.
"""
if row < 0 or row >= self.ny or col < 0 or col >= self.nx:
raise ValueError('Invalid row/col index: row=%d, col=%d' % (row, col))
xo = col * self._spx_unit
yo = row * self._spy_unit
if unit_mode:
return xo, yo
return xo * self.resolution, yo * self.resolution
class InstanceInfo(dict):
"""A dictionary that represents a layout instance.
"""
param_list = ['lib', 'cell', 'view', 'name', 'loc', 'orient', 'num_rows',
'num_cols', 'sp_rows', 'sp_cols', 'master_key']
def __init__(self, res, change_orient=True, **kwargs):
kv_iter = ((key, kwargs[key]) for key in self.param_list)
dict.__init__(self, kv_iter)
self._resolution = res
if 'params' in kwargs:
self.params = kwargs['params']
# skill/OA array before rotation, while we're doing the opposite.
# this is supposed to fix it.
if change_orient:
orient = self['orient']
if orient == 'R180':
self['sp_rows'] *= -1
self['sp_cols'] *= -1
elif orient == 'MX':
self['sp_rows'] *= -1
elif orient == 'MY':
self['sp_cols'] *= -1
elif orient == 'R90':
self['sp_rows'], self['sp_cols'] = self['sp_cols'], -self['sp_rows']
self['num_rows'], self['num_cols'] = self['num_cols'], self['num_rows']
elif orient == 'MXR90':
self['sp_rows'], self['sp_cols'] = self['sp_cols'], self['sp_rows']
self['num_rows'], self['num_cols'] = self['num_cols'], self['num_rows']
elif orient == 'MYR90':
self['sp_rows'], self['sp_cols'] = -self['sp_cols'], -self['sp_rows']
self['num_rows'], self['num_cols'] = self['num_cols'], self['num_rows']
elif orient == 'R270':
self['sp_rows'], self['sp_cols'] = -self['sp_cols'], self['sp_rows']
self['num_rows'], self['num_cols'] = self['num_cols'], self['num_rows']
elif orient != 'R0':
raise ValueError('Unknown orientation: %s' % orient)
@property
def lib(self):
# type: () -> str
return self['lib']
@property
def cell(self):
# type: () -> str
return self['cell']
@property
def view(self):
# type: () -> str
return self['view']
@property
def name(self):
# type: () -> str
return self['name']
@name.setter
def name(self, new_name):
# type: (str) -> None
self['name'] = new_name
@property
def loc(self):
# type: () -> Tuple[float, float]
loc_list = self['loc']
return loc_list[0], loc_list[1]
@property
def orient(self):
# type: () -> str
return self['orient']
@property
def num_rows(self):
# type: () -> int
return self['num_rows']
@property
def num_cols(self):
# type: () -> int
return self['num_cols']
@property
def sp_rows(self):
# type: () -> float
return self['sp_rows']
@property
def sp_cols(self):
# type: () -> float
return self['sp_cols']
@property
def params(self):
# type: () -> Optional[Dict[str, Any]]
return self.get('params', None)
@params.setter
def params(self, new_params):
# type: (Optional[Dict[str, Any]]) -> None
self['params'] = new_params
@property
def master_key(self):
return self.get('master_key', None)
@master_key.setter
def master_key(self, value):
self['master_key'] = value
@property
def angle_reflect(self):
# type: () -> Tuple[int, bool]
orient = self['orient']
if orient == 'R0':
return 0, False
elif orient == 'R180':
return 180, False
elif orient == 'MX':
return 0, True
elif orient == 'MY':
return 180, True
elif orient == 'R90':
return 90, False
elif orient == 'MXR90':
return 90, True
elif orient == 'MYR90':
return 270, True
elif orient == 'R270':
return 270, False
else:
raise ValueError('Unknown orientation: %s' % orient)
def copy(self):
"""Override copy method of dictionary to return an InstanceInfo instead."""
return InstanceInfo(self._resolution, change_orient=False, **self)
def move_by(self, dx=0, dy=0):
# type: (float, float) -> None
"""Move this instance by the given amount.
Parameters
----------
dx : float
the X shift.
dy : float
the Y shift.
"""
res = self._resolution
loc = self.loc
self['loc'] = [round((loc[0] + dx) / res) * res,
round((loc[1] + dy) / res) * res]
class Instance(Arrayable):
"""A layout instance, with optional arraying parameters.
Parameters
----------
parent_grid : RoutingGrid
the parent RoutingGrid object.
lib_name : str
the layout library name.
master : TemplateBase
the master template of this instance.
loc : Tuple[Union[float, int], Union[float, int]]
the origin of this instance.
orient : str
the orientation of this instance.
name : Optional[str]
name of this instance.
nx : int
number of columns.
ny : int
number of rows.
spx : Union[float, int]
column pitch.
spy : Union[float, int]
row pitch.
unit_mode : bool
True if layout dimensions are specified in resolution units.
"""
def __init__(self,
parent_grid, # type: RoutingGrid
lib_name, # type: str
master, # type: TemplateBase
loc, # type: Tuple[ldim, ldim]
orient, # type: str
name=None, # type: Optional[str]
nx=1, # type: int
ny=1, # type: int
spx=0, # type: ldim
spy=0, # type: ldim
unit_mode=False, # type: bool
):
# type: (...) -> None
res = parent_grid.resolution
Arrayable.__init__(self, res, nx=nx, ny=ny, spx=spx, spy=spy, unit_mode=unit_mode)
self._parent_grid = parent_grid
self._lib_name = lib_name
self._inst_name = name
self._master = master
if unit_mode:
self._loc_unit = loc[0], loc[1]
else:
self._loc_unit = int(round(loc[0] / res)), int(round(loc[1] / res))
self._orient = orient
def new_master_with(self, **kwargs):
# type: (**Any) -> None
"""Change the master template of this instance.
This method will get the old master template layout parameters, update
the parameter values with the given dictionary, then create a new master
template with those parameters and associate it with this instance.
Parameters
----------
**kwargs
a dictionary of new parameter values.
"""
self._master = self._master.new_template_with(**kwargs)
def blockage_iter(self, layer_id, test_box, spx=0, spy=0):
# type: (int, BBox, int, int) -> Generator[BBox, None, None]
# transform the given BBox to master coordinate
if self.destroyed:
return
base_box = self._master.get_track_bbox(layer_id)
if not base_box.is_physical():
return
base_box = self.translate_master_box(base_box)
test = test_box.expand(dx=spx, dy=spy, unit_mode=True)
inst_spx = max(self.spx_unit, 1)
inst_spy = max(self.spy_unit, 1)
xl = base_box.left_unit
yb = base_box.bottom_unit
xr = base_box.right_unit
yt = base_box.top_unit
nx0 = max(0, -(-(test.left_unit - xr) // inst_spx))
nx1 = min(self.nx - 1, (test.right_unit - xl) // inst_spx)
ny0 = max(0, -(-(test.bottom_unit - yt) // inst_spy))
ny1 = min(self.ny - 1, (test.top_unit - yb) // inst_spy)
orient = self._orient
x0, y0 = self._loc_unit
if (orient == 'R90' or orient == 'R270' or
orient == 'MXR90' or orient == 'MYR90'):
spx, spy = spy, spx
for row in range(ny0, ny1 + 1):
for col in range(nx0, nx1 + 1):
dx, dy = self.get_item_location(row=row, col=col, unit_mode=True)
loc = dx + x0, dy + y0
inv_loc, inv_orient = get_inverse_transform(loc, orient)
cur_box = test_box.transform(inv_loc, inv_orient, unit_mode=True)
for box in self._master.blockage_iter(layer_id, cur_box, spx=spx, spy=spy):
yield box.transform(loc, orient, unit_mode=True)
def all_rect_iter(self):
# type: () -> Generator[Tuple[BBox, int, int], None, None]
if self.destroyed:
return
orient = self._orient
x0, y0 = self._loc_unit
flip = (orient == 'R90' or orient == 'R270' or orient == 'MXR90' or orient == 'MYR90')
for layer_id, box, sdx, sdy in self._master.all_rect_iter():
if flip:
sdx, sdy = sdy, sdx
for row in range(self.ny):
for col in range(self.nx):
dx, dy = self.get_item_location(row=row, col=col, unit_mode=True)
loc = dx + x0, dy + y0
yield layer_id, box.transform(loc, orient, unit_mode=True), sdx, sdy
def intersection_rect_iter(self, layer_id, test_box):
# type: (int, BBox) -> Generator[BBox, None, None]
if self.destroyed:
return
base_box = self._master.get_track_bbox(layer_id)
if not base_box.is_physical():
return
base_box = self.translate_master_box(base_box)
inst_spx = max(self.spx_unit, 1)
inst_spy = max(self.spy_unit, 1)
xl = base_box.left_unit
yb = base_box.bottom_unit
xr = base_box.right_unit
yt = base_box.top_unit
nx0 = max(0, -(-(test_box.left_unit - xr) // inst_spx))
nx1 = min(self.nx - 1, (test_box.right_unit - xl) // inst_spx)
ny0 = max(0, -(-(test_box.bottom_unit - yt) // inst_spy))
ny1 = min(self.ny - 1, (test_box.top_unit - yb) // inst_spy)
orient = self._orient
x0, y0 = self._loc_unit
for row in range(ny0, ny1 + 1):
for col in range(nx0, nx1 + 1):
dx, dy = self.get_item_location(row=row, col=col, unit_mode=True)
loc = dx + x0, dy + y0
inv_loc, inv_orient = get_inverse_transform(loc, orient)
cur_box = test_box.transform(inv_loc, inv_orient, unit_mode=True)
for box in self._master.intersection_rect_iter(layer_id, cur_box):
yield box.transform(loc, orient, unit_mode=True)
def get_rect_bbox(self, layer):
"""Returns the overall bounding box of all rectangles on the given layer.
Note: currently this does not check primitive instances or vias.
"""
bbox = self._master.get_rect_bbox(layer)
if not bbox.is_valid():
return bbox
box_arr = BBoxArray(self.translate_master_box(bbox), nx=self.nx, ny=self.ny,
spx=self.spx_unit, spy=self.spy_unit, unit_mode=True)
return box_arr.get_overall_bbox()
def track_bbox_iter(self):
for layer_id, bbox in self._master.track_bbox_iter():
box_arr = BBoxArray(self.translate_master_box(bbox), nx=self.nx, ny=self.ny,
spx=self.spx_unit, spy=self.spy_unit, unit_mode=True)
yield layer_id, box_arr.get_overall_bbox()
@property
def master(self):
# type: () -> TemplateBase
"""The master template of this instance."""
return self._master
@property
def location(self):
# type: () -> Tuple[float, float]
"""The instance location."""
return self._loc_unit[0] * self.resolution, self._loc_unit[1] * self.resolution
@location.setter
def location(self, new_loc):
# type: (Tuple[float, float]) -> None
"""Sets the instance location."""
self.check_destroyed()
self._loc_unit = (int(round(new_loc[0] / self.resolution)),
int(round(new_loc[1] / self.resolution)))
@property
def location_unit(self):
# type: () -> Tuple[int, int]
"""The instance location."""
return self._loc_unit
@location_unit.setter
def location_unit(self, new_loc):
# type: (Tuple[int, int]) -> None
"""Sets the instance location."""
self.check_destroyed()
self._loc_unit = (new_loc[0], new_loc[1])
@property
def orientation(self):
# type: () -> str
"""The instance orientation"""
return self._orient
@orientation.setter
def orientation(self, val):
# type: (str) -> None
"""Sets the instance orientation."""
self.check_destroyed()
if val not in transform_table:
raise ValueError('Unsupported orientation: %s' % val)
self._orient = val
@property
def content(self):
# type: () -> InstanceInfo
"""A dictionary representation of this instance."""
return InstanceInfo(self.resolution,
lib=self._lib_name,
cell=self.master.cell_name,
view='layout',
name=self._inst_name,
loc=list(self.location),
orient=self.orientation,
num_rows=self.ny,
num_cols=self.nx,
sp_rows=self.spy,
sp_cols=self.spx,
master_key=self.master.key
)
@property
def bound_box(self):
# type: () -> BBox
"""Returns the overall bounding box of this instance."""
box_arr = BBoxArray(self._master.bound_box, nx=self.nx, ny=self.ny,
spx=self._spx_unit, spy=self._spy_unit, unit_mode=True)
return box_arr.get_overall_bbox().transform(self.location_unit, self.orientation,
unit_mode=True)
@property
def array_box(self):
# type: () -> BBox
"""Returns the array box of this instance."""
master_box = getattr(self._master, 'array_box', None) # type: BBox
if master_box is None:
raise ValueError('Master template array box is not defined.')
box_arr = BBoxArray(master_box, nx=self.nx, ny=self.ny,
spx=self._spx_unit, spy=self._spy_unit, unit_mode=True)
return box_arr.get_overall_bbox().transform(self.location_unit, self.orientation,
unit_mode=True)
@property
def fill_box(self):
# type: () -> BBox
"""Returns the array box of this instance."""
master_box = getattr(self._master, 'fill_box', None) # type: BBox
if master_box is None:
raise ValueError('Master template fill box is not defined.')
box_arr = BBoxArray(master_box, nx=self.nx, ny=self.ny,
spx=self._spx_unit, spy=self._spy_unit, unit_mode=True)
return box_arr.get_overall_bbox().transform(self.location_unit, self.orientation,
unit_mode=True)
def get_bound_box_of(self, row=0, col=0):
"""Returns the bounding box of an instance in this mosaic."""
dx, dy = self.get_item_location(row=row, col=col, unit_mode=True)
xshift, yshift = self._loc_unit
xshift += dx
yshift += dy
return self._master.bound_box.transform((xshift, yshift), self.orientation, unit_mode=True)
def move_by(self, dx=0, dy=0, unit_mode=False):
# type: (Union[float, int], Union[float, int], bool) -> None
"""Move this instance by the given amount.
Parameters
----------
dx : Union[float, int]
the X shift.
dy : Union[float, int]
the Y shift.
unit_mode : bool
True if shifts are given in resolution units
"""
if not unit_mode:
dx = int(round(dx / self.resolution))
dy = int(round(dy / self.resolution))
self._loc_unit = self._loc_unit[0] + dx, self._loc_unit[1] + dy
def translate_master_box(self, box):
# type: (BBox) -> BBox
"""Transform the bounding box in master template.
Parameters
----------
box : BBox
the BBox in master template coordinate.
Returns
-------
new_box : BBox
the cooresponding BBox in instance coordinate.
"""
return box.transform(self.location_unit, self.orientation, unit_mode=True)
def translate_master_location(self,
mloc, # type: Tuple[Union[float, int], Union[float, int]]
unit_mode=False, # type: bool
):
# type: (...) -> Tuple[Union[float, int], Union[float, int]]
"""Returns the actual location of the given point in master template.
Parameters
----------
mloc : Tuple[Union[float, int], Union[float, int]]
the location in master coordinate.
unit_mode : bool
True if location is given in resolution units.
Returns
-------
xi : Union[float, int]
the actual X coordinate. Integer if unit_mode is True.
yi : Union[float, int]
the actual Y coordinate. Integer if unit_mode is True.
"""
res = self.resolution
if unit_mode:
mx, my = mloc[0], mloc[1]
else:
mx, my = int(round(mloc[0] / res)), int(round(mloc[1] / res))
p = transform_point(mx, my, self.location_unit, self.orientation)
if unit_mode:
return p[0], p[1]
return p[0] * res, p[1] * res
def translate_master_track(self, layer_id, track_idx):
# type: (int, Union[float, int]) -> Union[float, int]
"""Returns the actual track index of the given track in master template.
Parameters
----------
layer_id : int
the layer ID.
track_idx : Union[float, int]
the track index.
Returns
-------
new_idx : Union[float, int]
the new track index.
"""
dx, dy = self.location_unit
return self._parent_grid.transform_track(layer_id, track_idx, dx=dx, dy=dy,
orient=self.orientation, unit_mode=True)
def get_port(self, name='', row=0, col=0):
# type: (Optional[str], int, int) -> Port
"""Returns the port object of the given instance in the array.
Parameters
----------
name : Optional[str]
the port terminal name. If None or empty, check if this
instance has only one port, then return it.
row : int
the instance row index. Index 0 is the bottom-most row.
col : int
the instance column index. Index 0 is the left-most column.
Returns
-------
port : Port
the port object.
"""
dx, dy = self.get_item_location(row=row, col=col, unit_mode=True)
xshift, yshift = self._loc_unit
loc = (xshift + dx, yshift + dy)
return self._master.get_port(name).transform(self._parent_grid, loc=loc,
orient=self.orientation, unit_mode=True)
def get_pin(self, name='', row=0, col=0, layer=-1):
# type: (Optional[str], int, int, int) -> Union[WireArray, BBox]
"""Returns the first pin with the given name.
This is an efficient method if you know this instance has exactly one pin.
Parameters
----------
name : Optional[str]
the port terminal name. If None or empty, check if this
instance has only one port, then return it.
row : int
the instance row index. Index 0 is the bottom-most row.
col : int
the instance column index. Index 0 is the left-most column.
layer : int
the pin layer. If negative, check to see if the given port has only one layer.
If so then use that layer.
Returns
-------
pin : Union[WireArray, BBox]
the first pin associated with the port of given name.
"""
port = self.get_port(name, row, col)
return port.get_pins(layer)[0]
def get_all_port_pins(self, name='', layer=-1):
# type: (Optional[str], int) -> List[WireArray]
"""Returns a list of all pins of all ports with the given name in this instance array.
This method gathers ports from all instances in this array with the given name,
then find all pins of those ports on the given layer, then return as list of WireArrays.
Parameters
----------
name : Optional[str]
the port terminal name. If None or empty, check if this
instance has only one port, then return it.
layer : int
the pin layer. If negative, check to see if the given port has only one layer.
If so then use that layer.
Returns
-------
pin_list : List[WireArray]
the list of pins as WireArrays.
"""
results = []
for col in range(self.nx):
for row in range(self.ny):
port = self.get_port(name, row, col)
results.extend(port.get_pins(layer))
return results
def port_pins_iter(self, name='', layer=-1):
# type: (Optional[str], int) -> Iterator[WireArray]
"""Iterate through all pins of all ports with the given name in this instance array.
Parameters
----------
name : Optional[str]
the port terminal name. If None or empty, check if this
instance has only one port, then return it.
layer : int
the pin layer. If negative, check to see if the given port has only one layer.
If so then use that layer.
Yields
------
pin : WireArray
the pin as WireArray.
"""
for col in range(self.nx):
for row in range(self.ny):
try:
port = self.get_port(name, row, col)
except KeyError:
return
for warr in port.get_pins(layer):
yield warr
def port_names_iter(self):
# type: () -> Iterable[str]
"""Iterates over port names in this instance.
Yields
------
port_name : str
name of a port in this instance.
"""
return self._master.port_names_iter()
def has_port(self, port_name):
# type: (str) -> bool
"""Returns True if this instance has the given port."""
return self._master.has_port(port_name)
def has_prim_port(self, port_name):
# type: (str) -> bool
"""Returns True if this instance has the given primitive port."""
return self._master.has_prim_port(port_name)
def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):
# type: (Tuple[ldim, ldim], str, bool, bool) -> Optional[Figure]
"""Transform this figure."""
if not unit_mode:
res = self.resolution
loc = int(round(loc[0] / res)), int(round(loc[1] / res))
if not copy:
ans = self
else:
ans = deepcopy(self)
ans._loc_unit = loc
ans._orient = orient
return ans
class Rect(Arrayable):
"""A layout rectangle, with optional arraying parameters.
Parameters
----------
layer : string or (string, string)
the layer name, or a tuple of layer name and purpose name.
If pupose name not given, defaults to 'drawing'.
bbox : bag.layout.util.BBox or bag.layout.util.BBoxArray
the base bounding box. If this is a BBoxArray, the BBoxArray's
arraying parameters are used.
nx : int
number of columns.
ny : int
number of rows.
spx : float
column pitch.
spy : float
row pitch.
unit_mode : bool
True if layout dimensions are specified in resolution units.
"""
def __init__(self, layer, bbox, nx=1, ny=1, spx=0, spy=0, unit_mode=False):
# python 2/3 compatibility: convert raw bytes to string.
layer = bag.io.fix_string(layer)
if isinstance(layer, str):
layer = (layer, 'drawing')
self._layer = layer[0], layer[1]
if isinstance(bbox, BBoxArray):
self._bbox = bbox.base
Arrayable.__init__(self, self._bbox.resolution, nx=bbox.nx, ny=bbox.ny,
spx=bbox.spx_unit, spy=bbox.spy_unit, unit_mode=True)
else:
self._bbox = bbox
Arrayable.__init__(self, self._bbox.resolution, nx=nx, ny=ny, spx=spx, spy=spy,
unit_mode=unit_mode)
@property
def bbox_array(self):
"""The BBoxArray representing this (Arrayed) rectangle.
Returns
-------
barr : :class:`bag.layout.util.BBoxArray`
the BBoxArray representing this (Arrayed) rectangle.
"""
return BBoxArray(self._bbox, nx=self.nx, ny=self.ny,
spx=self.spx_unit, spy=self.spy_unit, unit_mode=True)
@property
def layer(self):
"""The rectangle (layer, purpose) pair."""
return self._layer
@layer.setter
def layer(self, val):
"""Sets the rectangle layer."""
self.check_destroyed()
# python 2/3 compatibility: convert raw bytes to string.
val = bag.io.fix_string(val)
if isinstance(val, str):
val = (val, 'drawing')
self._layer = val[0], val[1]
print("WARNING: USING THIS BREAKS POWER FILL ALGORITHM.")
@property
def bbox(self):
"""The rectangle bounding box."""
return self._bbox
@bbox.setter
def bbox(self, val):
"""Sets the rectangle bounding box."""
self.check_destroyed()
if not val.is_physical():
raise ValueError('Bounding box %s is not physical' % val)
print("WARNING: USING THIS BREAKS POWER FILL ALGORITHM.")
self._bbox = val
@property
def content(self):
"""A dictionary representation of this rectangle."""
content = dict(layer=list(self.layer),
bbox=[[self.bbox.left, self.bbox.bottom], [self.bbox.right, self.bbox.top]],
)
if self.nx > 1 or self.ny > 1:
content['arr_nx'] = self.nx
content['arr_ny'] = self.ny
content['arr_spx'] = self.spx
content['arr_spy'] = self.spy
return content
def move_by(self, dx=0, dy=0, unit_mode=False):
"""Move the base rectangle by the given amount.
Parameters
----------
dx : float
the X shift.
dy : float
the Y shift.
unit_mode : bool
True if layout dimensions are specified in resolution units.
"""
print("WARNING: USING THIS BREAKS POWER FILL ALGORITHM.")
self._bbox = self._bbox.move_by(dx=dx, dy=dy, unit_mode=unit_mode)
def extend(self, x=None, y=None):
"""extend the base rectangle horizontally or vertically so it overlaps the given X/Y coordinate.
Parameters
----------
x : float or None
if not None, make sure the base rectangle overlaps this X coordinate.
y : float or None
if not None, make sure the base rectangle overlaps this Y coordinate.
"""
print("WARNING: USING THIS BREAKS POWER FILL ALGORITHM.")
self._bbox = self._bbox.extend(x=x, y=y)
def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):
# type: (Tuple[ldim, ldim], str, bool, bool) -> Optional[Figure]
"""Transform this figure."""
new_box = self._bbox.transform(loc=loc, orient=orient, unit_mode=unit_mode)
if not copy:
print("WARNING: USING THIS BREAKS POWER FILL ALGORITHM.")
ans = self
else:
ans = deepcopy(self)
ans._bbox = new_box
return ans
def destroy(self):
# type: () -> None
"""Destroy this instance."""
print("WARNING: USING THIS BREAKS POWER FILL ALGORITHM.")
Arrayable.destroy(self)
class Path(Figure):
"""A layout path. Only 45/90 degree turns are allowed.
Parameters
----------
resolution : float
the layout grid resolution.
layer : string or (string, string)
the layer name, or a tuple of layer name and purpose name.
If purpose name not given, defaults to 'drawing'.
width : float
width of this path, in layout units.
points : List[Tuple[float, float]]
list of path points.
end_style : str
the path ends style. Currently support 'truncate', 'extend', and 'round'.
join_style : str
the ends style at intermediate points of the path. Currently support 'extend' and 'round'.
unit_mode : bool
True if width and points are given as resolution units instead of layout units.
"""
def __init__(self,
resolution, # type: float
layer, # type: Union[str, Tuple[str, str]]
width, # type: Union[int, float]
points, # type: List[Tuple[Union[int, float], Union[int, float]]]
end_style='truncate', # type: str
join_style='extend', # type: str
unit_mode=False, # type: bool
):
# type: (...) -> None
layer = bag.io.fix_string(layer)
Figure.__init__(self, resolution)
if isinstance(layer, str):
layer = (layer, 'drawing')
self._layer = layer
self._end_style = end_style
self._join_style = join_style
self._destroyed = False
self._width = 0
self._points = None
if not unit_mode:
self._width = int(round(width / resolution))
pt_list = self.compress_points(((int(round(x / resolution)), int(round(y / resolution)))
for x, y in points))
else:
self._width = width
pt_list = self.compress_points(points)
self._points = np.array(pt_list, dtype=int)
@classmethod
def compress_points(cls, pts_unit):
# remove collinear/duplicate points, and make sure all segments are 45 degrees.
cur_len = 0
pt_list = []
for x, y in pts_unit:
if cur_len == 0:
pt_list.append((x, y))
cur_len += 1
else:
lastx, lasty = pt_list[-1]
# make sure we don't have duplicate points
if x != lastx or y != lasty:
dx, dy = x - lastx, y - lasty
if dx != 0 and dy != 0 and abs(dx) != abs(dy):
# we don't have 45 degree wires
raise ValueError('Cannot have line segment (%d, %d)->(%d, %d) in path'
% (lastx, lasty, x, y))
if cur_len >= 2:
# check for collinearity
dx0, dy0 = lastx - pt_list[-2][0], lasty - pt_list[-2][1]
if (dx == 0 and dx0 == 0) or (dx != 0 and dx0 != 0 and
dy / dx == dy0 / dx0):
# collinear, remove middle point
del pt_list[-1]
cur_len -= 1
pt_list.append((x, y))
cur_len += 1
return pt_list
@property
def layer(self):
# type: () -> Tuple[str, str]
"""The rectangle (layer, purpose) pair."""
return self._layer
@Figure.valid.getter
def valid(self):
# type: () -> bool
"""Returns True if this instance is valid."""
return not self.destroyed and len(self._points) >= 2 and self._width > 0
@property
def width(self):
return self._width * self._res
@property
def points(self):
return [(self._points[idx][0] * self._res, self._points[idx][1] * self._res)
for idx in range(self._points.shape[0])]
@property
def points_unit(self):
return [(self._points[idx][0], self._points[idx][1])
for idx in range(self._points.shape[0])]
@property
def content(self):
# type: () -> Dict[str, Any]
"""A dictionary representation of this path."""
content = dict(layer=list(self.layer),
width=self._width * self._res,
points=self.points,
end_style=self._end_style,
join_style=self._join_style,
)
return content
def move_by(self, dx=0, dy=0, unit_mode=False):
# type: (ldim, ldim, bool) -> None
"""Move this path by the given amount.
Parameters
----------
dx : float
the X shift.
dy : float
the Y shift.
unit_mode : bool
True if shifts are given in resolution units.
"""
if not unit_mode:
dx = int(round(dx / self._res))
dy = int(round(dy / self._res))
self._points += np.array([dx, dy])
def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):
# type: (Tuple[ldim, ldim], str, bool, bool) -> Figure
"""Transform this figure."""
res = self.resolution
if unit_mode:
dx, dy = loc
else:
dx = int(round(loc[0] / res))
dy = int(round(loc[1] / res))
dvec = np.array([dx, dy])
mat = transform_table[orient]
new_points = np.dot(mat, self._points.T).T + dvec
if not copy:
ans = self
else:
ans = deepcopy(self)
ans._points = new_points
return ans
class PathCollection(Figure):
"""A layout figure that consists of one or more paths.
This class make it easy to draw bus/trasmission line objects.
Parameters
----------
resolution : float
layout unit resolution.
paths : List[Path]
paths in this collection.
"""
def __init__(self, resolution, paths):
Figure.__init__(self, resolution)
self._paths = paths
def move_by(self, dx=0, dy=0, unit_mode=False):
# type: (ldim, ldim, bool) -> None
"""Move this path by the given amount.
Parameters
----------
dx : float
the X shift.
dy : float
the Y shift.
unit_mode : bool
True if shifts are given in resolution units.
"""
for path in self._paths:
path.move_by(dx=dx, dy=dy, unit_mode=unit_mode)
def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=True):
# type: (Tuple[ldim, ldim], str, bool, bool) -> PathCollection
"""Transform this figure."""
if copy:
ans = deepcopy(self)
else:
ans = self
for p in ans._paths:
p.transform(loc=loc, orient=orient, unit_mode=unit_mode, copy=False)
return ans
class TLineBus(PathCollection):
"""A transmission line bus drawn using Path.
assumes only 45 degree turns are used, and begin and end line segments are straight.
Parameters
----------
resolution : float
layout unit resolution.
layer : Union[str, Tuple[str, str]]
the bus layer.
points : List[Tuple[Union[float, int], Union[float, int]]]
list of center points of the bus.
widths : List[Union[float, int]]
list of wire widths. 0 index is left/bottom most wire.
spaces : List[Union[float, int]]
list of wire spacings.
end_style : str
the path ends style. Currently support 'truncate', 'extend', and 'round'.
unit_mode : bool
True if width and points are given as resolution units instead of layout units.
"""
def __init__(self, resolution, layer, points, widths, spaces, end_style='truncate',
unit_mode=False):
npoints = len(points)
if npoints < 2:
raise ValueError('Must have >= 2 points.')
if not unit_mode:
points = ((int(round(px / resolution)), int(round(py / resolution)))
for px, py in points)
widths = [int(round(v / resolution / 2.0)) * 2 for v in widths]
spaces = [int(round(v / resolution / 2.0)) * 2 for v in spaces]
points = Path.compress_points(points)
self._points = np.array(points, dtype=int)
self._layer = layer
self._widths = widths
self._spaces = spaces
self._end_style = end_style
tot_width = sum(self._widths) + sum(self._spaces)
delta_list = [(-tot_width + self._widths[0]) // 2]
for w0, w1, sp in zip(self._widths, self._widths[1:], self._spaces):
delta_list.append(delta_list[-1] + sp + ((w0 + w1) // 2))
print(tot_width)
print(self._widths)
print(self._spaces)
print(delta_list)
paths = self.create_paths(delta_list, resolution)
PathCollection.__init__(self, resolution, paths)
def paths_iter(self):
return iter(self._paths)
def create_paths(self, delta_list, res):
npoints = len(self._points)
npaths = len(self._widths)
path_points = [[] for _ in range(npaths)]
print(self._points)
# add first point
p0 = self._points[0, :]
s0 = self._points[1, :] - p0
s0 //= np.amax(np.absolute(s0))
s0_norm = np.linalg.norm(s0)
d0 = np.array([-s0[1], s0[0]])
for path, delta in zip(path_points, delta_list):
tmp = p0 + d0 * int(round(delta / s0_norm))
path.append((tmp[0], tmp[1]))
# add intermediate points
for last_idx in range(2, npoints):
p1 = self._points[last_idx - 1, :]
p0 = self._points[last_idx - 2, :]
s0 = p1 - p0
s1 = self._points[last_idx, :] - p1
s0 //= np.amax(np.absolute(s0))
s1 //= np.amax(np.absolute(s1))
s0_norm = np.linalg.norm(s0)
s1_norm = np.linalg.norm(s1)
dir0 = np.array([-s0[1], s0[0]])
dir1 = np.array([-s1[1], s1[0]])
for path, delta in zip(path_points, delta_list):
d0 = p0 + dir0 * int(round(delta / s0_norm))
d1 = p1 + dir1 * int(round(delta / s1_norm))
a = np.array([[-s1[1], s1[0]],
[s0[1], s0[0]]], dtype=int) // (s0[1] * s1[0] - s0[0] * s1[1])
sol = np.dot(a, d1 - d0)
tmp = sol[0] * s0 + d0
path.append((tmp[0], tmp[1]))
# add last points
p1 = self._points[-1, :]
s0 = p1 - self._points[-2, :]
s0 //= np.amax(np.absolute(s0))
s0_norm = np.linalg.norm(s0)
d0 = np.array([-s0[1], s0[0]])
for path, delta in zip(path_points, delta_list):
tmp = p1 + d0 * int(round(delta / s0_norm))
path.append((tmp[0], tmp[1]))
print(path_points)
paths = [Path(res, self._layer, w, pp, end_style=self._end_style,
join_style='round', unit_mode=True)
for w, pp in zip(self._widths, path_points)]
return paths
class Polygon(Figure):
"""A layout polygon object.
Parameters
----------
resolution : float
the layout grid resolution.
layer : Union[str, Tuple[str, str]]
the layer name, or a tuple of layer name and purpose name.
If purpose name not given, defaults to 'drawing'.
points : List[Tuple[Union[float, int], Union[float, int]]]
the points defining the polygon.
unit_mode : bool
True if the points are given in resolution units.
"""
def __init__(self,
resolution, # type: float
layer, # type: Union[str, Tuple[str, str]]
points, # type: List[Tuple[Union[float, int], Union[float, int]]]
unit_mode=False, # type: bool
):
# type: (...) -> None
Figure.__init__(self, resolution)
layer = bag.io.fix_string(layer)
if isinstance(layer, str):
layer = (layer, 'drawing')
self._layer = layer
if not unit_mode:
self._points = np.array(points) / resolution
self._points = self._points.astype(int)
else:
self._points = np.array(points, dtype=int)
@property
def layer(self):
# type: () -> str
"""The blockage layer."""
return self._layer
@property
def points(self):
return [(self._points[idx][0] * self._res, self._points[idx][1] * self._res)
for idx in range(self._points.shape[0])]
@property
def points_unit(self):
return [(self._points[idx][0], self._points[idx][1])
for idx in range(self._points.shape[0])]
@property
def content(self):
# type: () -> Dict[str, Any]
"""A dictionary representation of this blockage."""
content = dict(layer=self.layer,
points=self.points,
)
return content
def move_by(self, dx=0, dy=0, unit_mode=False):
# type: (ldim, ldim, bool) -> None
if not unit_mode:
dx = int(round(dx / self._res))
dy = int(round(dy / self._res))
self._points += np.array([dx, dy])
def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):
# type: (Tuple[ldim, ldim], str, bool, bool) -> Figure
"""Transform this figure."""
res = self.resolution
if unit_mode:
dx, dy = loc
else:
dx = int(round(loc[0] / res))
dy = int(round(loc[1] / res))
dvec = np.array([dx, dy])
mat = transform_table[orient]
new_points = np.dot(mat, self._points.T).T + dvec
if not copy:
ans = self
else:
ans = deepcopy(self)
ans._points = new_points
return ans
class Blockage(Polygon):
"""A blockage object.
Subclass Polygon for code reuse.
Parameters
----------
resolution : float
the layout grid resolution.
block_type : str
the blockage type. Currently supports 'routing' and 'placement'.
block_layer : str
the blockage layer. This value is ignored if blockage type is 'placement'.
points : List[Tuple[Union[float, int], Union[float, int]]]
the points defining the blockage.
unit_mode : bool
True if the points are given in resolution units.
"""
def __init__(self, resolution, block_type, block_layer, points, unit_mode=False):
# type: (float, str, str, List[Tuple[Union[float, int], Union[float, int]]], bool) -> None
Polygon.__init__(self, resolution, block_layer, points, unit_mode=unit_mode)
self._type = block_type
self._block_layer = block_layer
@property
def layer(self):
"""The blockage layer."""
return self._block_layer
@property
def type(self):
# type: () -> str
"""The blockage type."""
return self._type
@property
def content(self):
# type: () -> Dict[str, Any]
"""A dictionary representation of this blockage."""
content = dict(layer=self.layer,
btype=self.type,
points=self.points,
)
return content
class Boundary(Polygon):
"""A boundary object.
Subclass Polygon for code reuse.
Parameters
----------
resolution : float
the layout grid resolution.
boundary_type : str
the boundary type. Currently supports 'PR', 'snap', and 'area'.
points : List[Tuple[Union[float, int], Union[float, int]]]
the points defining the blockage.
unit_mode : bool
True if the points are given in resolution units.
"""
def __init__(self, resolution, boundary_type, points, unit_mode=False):
# type: (float, str, List[Tuple[Union[float, int], Union[float, int]]], bool) -> None
Polygon.__init__(self, resolution, ('', ''), points, unit_mode=unit_mode)
self._type = boundary_type
@property
def type(self):
# type: () -> str
"""The blockage type."""
return self._type
@property
def content(self):
# type: () -> Dict[str, Any]
"""A dictionary representation of this blockage."""
content = dict(btype=self.type,
points=self.points,
)
return content
class ViaInfo(dict):
"""A dictionary that represents a layout via.
"""
param_list = ['id', 'loc', 'orient', 'num_rows', 'num_cols', 'sp_rows', 'sp_cols',
'enc1', 'enc2']
def __init__(self, res, **kwargs):
kv_iter = ((key, kwargs[key]) for key in self.param_list)
dict.__init__(self, kv_iter)
for opt_par in ['cut_width', 'cut_height', 'arr_nx', 'arr_ny', 'arr_spx', 'arr_spy']:
if opt_par in kwargs:
self[opt_par] = kwargs[opt_par]
self._resolution = res
@property
def id(self):
# type: () -> str
return self['id']
@property
def loc(self):
# type: () -> Tuple[float, float]
loc_list = self['loc']
return loc_list[0], loc_list[1]
@property
def orient(self):
# type: () -> str
return self['orient']
@property
def num_rows(self):
# type: () -> int
return self['num_rows']
@property
def num_cols(self):
# type: () -> int
return self['num_cols']
@property
def sp_rows(self):
# type: () -> float
return self['sp_rows']
@property
def sp_cols(self):
# type: () -> float
return self['sp_cols']
@property
def enc1(self):
# type: () -> Tuple[float, float, float, float]
enc_list = self['enc1']
return enc_list[0], enc_list[1], enc_list[2], enc_list[3]
@property
def enc2(self):
# type: () -> Tuple[float, float, float, float]
enc_list = self['enc2']
return enc_list[0], enc_list[1], enc_list[2], enc_list[3]
@property
def cut_width(self):
# type: () -> float
return self.get('cut_width', -1)
@property
def cut_height(self):
# type: () -> float
return self.get('cut_height', -1)
@property
def arr_nx(self):
# type: () -> int
return self.get('arr_nx', 1)
@property
def arr_ny(self):
# type: () -> int
return self.get('arr_ny', 1)
@property
def arr_spx(self):
# type: () -> float
return self.get('arr_spx', 0)
@property
def arr_spy(self):
# type: () -> float
return self.get('arr_spy', 0)
def move_by(self, dx=0, dy=0):
# type: (float, float) -> None
"""Move this instance by the given amount.
Parameters
----------
dx : float
the X shift.
dy : float
the Y shift.
"""
res = self._resolution
loc = self.loc
self['loc'] = [round((loc[0] + dx) / res) * res,
round((loc[1] + dy) / res) * res]
class Via(Arrayable):
"""A layout via, with optional arraying parameters.
Parameters
----------
tech : bag.layout.core.TechInfo
the technology class used to calculate via information.
bbox : bag.layout.util.BBox or bag.layout.util.BBoxArray
the via bounding box, not including extensions.
If this is a BBoxArray, the BBoxArray's arraying parameters are used.
bot_layer : str or (str, str)
the bottom layer name, or a tuple of layer name and purpose name.
If purpose name not given, defaults to 'drawing'.
top_layer : str or (str, str)
the top layer name, or a tuple of layer name and purpose name.
If purpose name not given, defaults to 'drawing'.
bot_dir : str
the bottom layer extension direction. Either 'x' or 'y'.
nx : int
arraying parameter. Number of columns.
ny : int
arraying parameter. Mumber of rows.
spx : float
arraying parameter. Column pitch.
spy : float
arraying parameter. Row pitch.
extend : bool
True if via extension can be drawn outside of bounding box.
top_dir : Optional[str]
top layer extension direction. Can force to extend in same direction as bottom.
unit_mode : bool
True if array pitches are given in resolution units.
"""
def __init__(self, tech, bbox, bot_layer, top_layer, bot_dir,
nx=1, ny=1, spx=0, spy=0, extend=True, top_dir=None, unit_mode=False):
if isinstance(bbox, BBoxArray):
self._bbox = bbox.base
Arrayable.__init__(self, tech.resolution, nx=bbox.nx, ny=bbox.ny,
spx=bbox.spx_unit, spy=bbox.spy_unit, unit_mode=True)
else:
self._bbox = bbox
Arrayable.__init__(self, tech.resolution, nx=nx, ny=ny, spx=spx, spy=spy,
unit_mode=unit_mode)
# python 2/3 compatibility: convert raw bytes to string.
bot_layer = bag.io.fix_string(bot_layer)
top_layer = bag.io.fix_string(top_layer)
if isinstance(bot_layer, str):
bot_layer = (bot_layer, 'drawing')
if isinstance(top_layer, str):
top_layer = (top_layer, 'drawing')
self._tech = tech
self._bot_layer = bot_layer[0], bot_layer[1]
self._top_layer = top_layer[0], top_layer[1]
self._bot_dir = bot_dir
self._top_dir = top_dir
self._extend = extend
self._info = self._tech.get_via_info(self._bbox, bot_layer, top_layer, bot_dir,
top_dir=top_dir, extend=extend)
if self._info is None:
raise ValueError('Cannot make via with bounding box %s' % self._bbox)
def _update(self):
"""Update via parameters."""
self._info = self._tech.get_via_info(self.bbox, self.bot_layer, self.top_layer,
self.bottom_direction, top_dir=self.top_direction,
extend=self.extend)
@property
def top_box(self):
# type: () -> BBox
"""the top via layer bounding box."""
return self._info['top_box']
@property
def bottom_box(self):
# type: () -> BBox
"""the bottom via layer bounding box."""
return self._info['bot_box']
@property
def bot_layer(self):
"""The bottom via (layer, purpose) pair."""
return self._bot_layer
@property
def top_layer(self):
"""The top via layer."""
return self._top_layer
@property
def bottom_direction(self):
"""the bottom via extension direction."""
return self._bot_dir
@bottom_direction.setter
def bottom_direction(self, new_bot_dir):
"""Sets the bottom via extension direction."""
self.check_destroyed()
self._bot_dir = new_bot_dir
self._update()
@property
def top_direction(self):
"""the bottom via extension direction."""
if not self._top_dir:
return 'x' if self._bot_dir == 'y' else 'y'
return self._top_dir
@top_direction.setter
def top_direction(self, new_top_dir):
"""Sets the bottom via extension direction."""
self.check_destroyed()
self._top_dir = new_top_dir
self._update()
@property
def extend(self):
"""True if via extension can grow beyond bounding box."""
return self._extend
@extend.setter
def extend(self, new_val):
self._extend = new_val
@property
def bbox(self):
"""The via bounding box not including extensions."""
return self._bbox
@property
def bbox_array(self):
"""The via bounding box array, not including extensions.
Returns
-------
barr : :class:`bag.layout.util.BBoxArray`
the BBoxArray representing this (Arrayed) rectangle.
"""
return BBoxArray(self._bbox, nx=self.nx, ny=self.ny, spx=self.spx_unit,
spy=self.spy_unit, unit_mode=True)
@bbox.setter
def bbox(self, new_bbox):
"""Sets the via bounding box. Will redraw the via."""
self.check_destroyed()
if not new_bbox.is_physical():
raise ValueError('Bounding box %s is not physical' % new_bbox)
self._bbox = new_bbox
self._update()
@property
def content(self):
"""A dictionary representation of this via."""
via_params = self._info['params']
content = ViaInfo(self._tech.resolution, **via_params)
if self.nx > 1 or self.ny > 1:
content['arr_nx'] = self.nx
content['arr_ny'] = self.ny
content['arr_spx'] = self.spx
content['arr_spy'] = self.spy
return content
def move_by(self, dx=0, dy=0, unit_mode=False):
# type: (ldim, ldim, bool) -> None
"""Move this path by the given amount.
Parameters
----------
dx : float
the X shift.
dy : float
the Y shift.
unit_mode : bool
True if shifts are given in resolution units.
"""
self._bbox = self._bbox.move_by(dx=dx, dy=dy, unit_mode=unit_mode)
self._info['top_box'] = self._info['top_box'].move_by(dx=dx, dy=dy, unit_mode=unit_mode)
self._info['bot_box'] = self._info['bot_box'].move_by(dx=dx, dy=dy, unit_mode=unit_mode)
self._info['params']['loc'] = [self._bbox.xc, self._bbox.yc]
def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):
# type: (Tuple[ldim, ldim], str, bool, bool) -> Figure
"""Transform this figure."""
new_box = self._bbox.transform(loc=loc, orient=orient, unit_mode=unit_mode)
if copy:
return Via(self._tech, new_box, self._bot_layer, self._top_layer, self._bot_dir,
nx=self.nx, ny=self.ny, spx=self.spx_unit, spy=self.spy_unit,
unit_mode=True)
else:
self._bbox = new_box
self._info['top_box'] = self._info['top_box'].transform(loc=loc, orient=orient,
unit_mode=unit_mode)
self._info['bot_box'] = self._info['bot_box'].transform(loc=loc, orient=orient,
unit_mode=unit_mode)
self._info['params']['loc'] = [self._bbox.xc, self._bbox.yc]
class PinInfo(dict):
"""A dictionary that represents a layout pin.
"""
param_list = ['net_name', 'pin_name', 'label', 'layer', 'bbox', 'make_rect']
def __init__(self, res, **kwargs):
kv_iter = ((key, kwargs[key]) for key in self.param_list)
dict.__init__(self, kv_iter)
self._resolution = res
@property
def net_name(self):
# type: () -> str
return self['net_name']
@property
def pin_name(self):
# type: () -> str
return self['pin_name']
@property
def label(self):
# type: () -> str
return self['label']
@property
def layer(self):
# type: () -> Tuple[str, str]
lay_list = self['layer']
return lay_list[0], lay_list[1]
@property
def bbox(self):
# type: () -> BBox
bbox_list = self['bbox']
return BBox(bbox_list[0][0], bbox_list[0][1], bbox_list[1][0], bbox_list[1][1],
self._resolution)
@property
def make_rect(self):
# type: () -> bool
return self['make_rect']
def move_by(self, dx=0, dy=0):
# type: (float, float) -> None
"""Move this instance by the given amount.
Parameters
----------
dx : float
the X shift.
dy : float
the Y shift.
"""
new_box = self.bbox.move_by(dx=dx, dy=dy)
self['bbox'] = [[new_box.left, new_box.bottom], [new_box.right, new_box.top]]
| [
"numpy.absolute",
"copy.deepcopy",
"numpy.linalg.norm",
"numpy.array",
"numpy.dot"
] | [((37893, 37921), 'numpy.array', 'np.array', (['pt_list'], {'dtype': 'int'}), '(pt_list, dtype=int)\n', (37901, 37921), True, 'import numpy as np\n'), ((41007, 41025), 'numpy.array', 'np.array', (['[dx, dy]'], {}), '([dx, dy])\n', (41015, 41025), True, 'import numpy as np\n'), ((41396, 41414), 'numpy.array', 'np.array', (['[dx, dy]'], {}), '([dx, dy])\n', (41404, 41414), True, 'import numpy as np\n'), ((44365, 44392), 'numpy.array', 'np.array', (['points'], {'dtype': 'int'}), '(points, dtype=int)\n', (44373, 44392), True, 'import numpy as np\n'), ((45415, 45433), 'numpy.linalg.norm', 'np.linalg.norm', (['s0'], {}), '(s0)\n', (45429, 45433), True, 'import numpy as np\n'), ((45447, 45472), 'numpy.array', 'np.array', (['[-s0[1], s0[0]]'], {}), '([-s0[1], s0[0]])\n', (45455, 45472), True, 'import numpy as np\n'), ((46738, 46756), 'numpy.linalg.norm', 'np.linalg.norm', (['s0'], {}), '(s0)\n', (46752, 46756), True, 'import numpy as np\n'), ((46770, 46795), 'numpy.array', 'np.array', (['[-s0[1], s0[0]]'], {}), '([-s0[1], s0[0]])\n', (46778, 46795), True, 'import numpy as np\n'), ((49362, 49380), 'numpy.array', 'np.array', (['[dx, dy]'], {}), '([dx, dy])\n', (49370, 49380), True, 'import numpy as np\n'), ((49751, 49769), 'numpy.array', 'np.array', (['[dx, dy]'], {}), '([dx, dy])\n', (49759, 49769), True, 'import numpy as np\n'), ((30878, 30892), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (30886, 30892), False, 'from copy import deepcopy\n'), ((35657, 35671), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (35665, 35671), False, 'from copy import deepcopy\n'), ((41588, 41602), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (41596, 41602), False, 'from copy import deepcopy\n'), ((42759, 42773), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (42767, 42773), False, 'from copy import deepcopy\n'), ((45380, 45395), 'numpy.absolute', 'np.absolute', (['s0'], {}), '(s0)\n', (45391, 45395), True, 'import numpy as np\n'), ((45983, 46001), 'numpy.linalg.norm', 'np.linalg.norm', (['s0'], {}), '(s0)\n', (45997, 46001), True, 'import numpy as np\n'), ((46024, 46042), 'numpy.linalg.norm', 'np.linalg.norm', (['s1'], {}), '(s1)\n', (46038, 46042), True, 'import numpy as np\n'), ((46062, 46087), 'numpy.array', 'np.array', (['[-s0[1], s0[0]]'], {}), '([-s0[1], s0[0]])\n', (46070, 46087), True, 'import numpy as np\n'), ((46107, 46132), 'numpy.array', 'np.array', (['[-s1[1], s1[0]]'], {}), '([-s1[1], s1[0]])\n', (46115, 46132), True, 'import numpy as np\n'), ((46703, 46718), 'numpy.absolute', 'np.absolute', (['s0'], {}), '(s0)\n', (46714, 46718), True, 'import numpy as np\n'), ((48371, 48398), 'numpy.array', 'np.array', (['points'], {'dtype': 'int'}), '(points, dtype=int)\n', (48379, 48398), True, 'import numpy as np\n'), ((49943, 49957), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (49951, 49957), False, 'from copy import deepcopy\n'), ((41474, 41501), 'numpy.dot', 'np.dot', (['mat', 'self._points.T'], {}), '(mat, self._points.T)\n', (41480, 41501), True, 'import numpy as np\n'), ((45900, 45915), 'numpy.absolute', 'np.absolute', (['s0'], {}), '(s0)\n', (45911, 45915), True, 'import numpy as np\n'), ((45944, 45959), 'numpy.absolute', 'np.absolute', (['s1'], {}), '(s1)\n', (45955, 45959), True, 'import numpy as np\n'), ((46478, 46496), 'numpy.dot', 'np.dot', (['a', '(d1 - d0)'], {}), '(a, d1 - d0)\n', (46484, 46496), True, 'import numpy as np\n'), ((48248, 48264), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (48256, 48264), True, 'import numpy as np\n'), ((49829, 49856), 'numpy.dot', 'np.dot', (['mat', 'self._points.T'], {}), '(mat, self._points.T)\n', (49835, 49856), True, 'import numpy as np\n'), ((46336, 46390), 'numpy.array', 'np.array', (['[[-s1[1], s1[0]], [s0[1], s0[0]]]'], {'dtype': 'int'}), '([[-s1[1], s1[0]], [s0[1], s0[0]]], dtype=int)\n', (46344, 46390), True, 'import numpy as np\n')] |
import numpy as np
def calculate(list):
if (len(list)<9):
raise ValueError('List must contain nine numbers.')
else :
arr = np.asarray(list)
arr = arr.reshape(3,3)
calculations = {'mean':[],'variance':[] ,'standard deviation':[],'max':[],'min':[],'sum':[]}
t1 = np.mean(arr, axis = 0)
t1 = t1.tolist()
t2 = np.mean(arr, axis = 1)
t2 = t2.tolist()
calculations['mean'] = [t1 , t2 , np.mean(arr)]
t1 = np.var(arr, axis = 0)
t1 = t1.tolist()
t2 = np.var(arr, axis = 1)
t2 = t2.tolist()
calculations['variance'] = [t1 , t2 , np.var(arr)]
t1 = np.std(arr, axis = 0)
t1 = t1.tolist()
t2 = np.std(arr, axis = 1)
t2 = t2.tolist()
calculations['standard deviation'] = [t1 , t2 , np.std(arr)]
t1 = np.max(arr, axis = 0)
t1 = t1.tolist()
t2 = np.max(arr, axis = 1)
t2 = t2.tolist()
calculations['max'] = [t1 , t2 , np.max(arr)]
t1 = np.min(arr, axis = 0)
t1 = t1.tolist()
t2 = np.min(arr, axis = 1)
t2 = t2.tolist()
calculations['min'] = [t1 , t2 , np.min(arr)]
t1 = np.sum(arr, axis = 0)
t1 = t1.tolist()
t2 = np.sum(arr, axis = 1)
t2 = t2.tolist()
calculations['sum'] = [t1 , t2 , np.sum(arr)]
return calculations
| [
"numpy.sum",
"numpy.std",
"numpy.asarray",
"numpy.max",
"numpy.mean",
"numpy.min",
"numpy.var"
] | [((138, 154), 'numpy.asarray', 'np.asarray', (['list'], {}), '(list)\n', (148, 154), True, 'import numpy as np\n'), ((289, 309), 'numpy.mean', 'np.mean', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (296, 309), True, 'import numpy as np\n'), ((342, 362), 'numpy.mean', 'np.mean', (['arr'], {'axis': '(1)'}), '(arr, axis=1)\n', (349, 362), True, 'import numpy as np\n'), ((447, 466), 'numpy.var', 'np.var', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (453, 466), True, 'import numpy as np\n'), ((499, 518), 'numpy.var', 'np.var', (['arr'], {'axis': '(1)'}), '(arr, axis=1)\n', (505, 518), True, 'import numpy as np\n'), ((606, 625), 'numpy.std', 'np.std', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (612, 625), True, 'import numpy as np\n'), ((658, 677), 'numpy.std', 'np.std', (['arr'], {'axis': '(1)'}), '(arr, axis=1)\n', (664, 677), True, 'import numpy as np\n'), ((775, 794), 'numpy.max', 'np.max', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (781, 794), True, 'import numpy as np\n'), ((827, 846), 'numpy.max', 'np.max', (['arr'], {'axis': '(1)'}), '(arr, axis=1)\n', (833, 846), True, 'import numpy as np\n'), ((929, 948), 'numpy.min', 'np.min', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (935, 948), True, 'import numpy as np\n'), ((981, 1000), 'numpy.min', 'np.min', (['arr'], {'axis': '(1)'}), '(arr, axis=1)\n', (987, 1000), True, 'import numpy as np\n'), ((1083, 1102), 'numpy.sum', 'np.sum', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (1089, 1102), True, 'import numpy as np\n'), ((1135, 1154), 'numpy.sum', 'np.sum', (['arr'], {'axis': '(1)'}), '(arr, axis=1)\n', (1141, 1154), True, 'import numpy as np\n'), ((424, 436), 'numpy.mean', 'np.mean', (['arr'], {}), '(arr)\n', (431, 436), True, 'import numpy as np\n'), ((584, 595), 'numpy.var', 'np.var', (['arr'], {}), '(arr)\n', (590, 595), True, 'import numpy as np\n'), ((753, 764), 'numpy.std', 'np.std', (['arr'], {}), '(arr)\n', (759, 764), True, 'import numpy as np\n'), ((907, 918), 'numpy.max', 'np.max', (['arr'], {}), '(arr)\n', (913, 918), True, 'import numpy as np\n'), ((1061, 1072), 'numpy.min', 'np.min', (['arr'], {}), '(arr)\n', (1067, 1072), True, 'import numpy as np\n'), ((1215, 1226), 'numpy.sum', 'np.sum', (['arr'], {}), '(arr)\n', (1221, 1226), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from requests.exceptions import HTTPError
import xarray as xr
from toffy.mibitracker_utils import MibiRequests
from toffy import qc_comp
from toffy import settings
import ark.utils.io_utils as io_utils
import ark.utils.misc_utils as misc_utils
import ark.utils.test_utils as test_utils
import os
from pathlib import Path
import pytest
import tempfile
parametrize = pytest.mark.parametrize
RUN_POINT_NAMES = ['Point%d' % i for i in range(1, 13)]
RUN_POINT_IDS = list(range(661, 673))
# NOTE: all fovs and all channels will be tested in the example_qc_metric_eval notebook test
FOVS_CHANS_TEST_MIBI = [
(None, ['CCL8', 'CD11b'], None, RUN_POINT_NAMES, RUN_POINT_IDS),
(None, ['CCL8', 'CD11b'], "TIFs", RUN_POINT_NAMES, RUN_POINT_IDS),
(['Point1'], None, None, RUN_POINT_NAMES[0:1], RUN_POINT_IDS[0:1]),
(['Point1'], None, "TIFs", RUN_POINT_NAMES[0:1], RUN_POINT_IDS[0:1]),
(['Point1'], ['CCL8', 'CD11b'], None, RUN_POINT_NAMES[0:1], RUN_POINT_IDS[0:1]),
(['Point1'], ['CCL8', 'CD11b'], "TIFs", RUN_POINT_NAMES[0:1], RUN_POINT_IDS[0:1])
]
FOVS_CHANS_TEST_QC = [
(None, None, False),
(None, None, True),
(['fov0', 'fov1'], None, False),
(['fov0', 'fov1'], None, True),
(None, ['chan0', 'chan1'], False),
(None, ['chan0', 'chan1'], True),
(['fov0', 'fov1'], ['chan0', 'chan1'], False),
(['fov0', 'fov1'], ['chan0', 'chan1'], True)
]
MIBITRACKER_EMAIL = '<EMAIL>'
MIBITRACKER_PASSWORD = '<PASSWORD>!?'
MIBITRACKER_RUN_NAME = '191008_JG85b'
MIBITRACKER_RUN_LABEL = 'JG85_Run2'
def test_create_mibitracker_request_helper():
# error check: bad email and/or password provided
mr = qc_comp.create_mibitracker_request_helper('bad_email', 'bad_password')
assert mr is None
# test creation works (just test the correct type returned)
mr = qc_comp.create_mibitracker_request_helper(MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD)
assert type(mr) == MibiRequests
@pytest.mark.parametrize(
"test_fovs,test_chans,test_sub_folder,actual_points,actual_ids",
FOVS_CHANS_TEST_MIBI
)
def test_download_mibitracker_data(test_fovs, test_chans, test_sub_folder,
actual_points, actual_ids):
with tempfile.TemporaryDirectory() as temp_dir:
# error check: bad base_dir provided
with pytest.raises(FileNotFoundError):
qc_comp.download_mibitracker_data('', '', '', '', 'bad_base_dir', '', '')
# error check: bad run_name and/or run_label provided
with pytest.raises(ValueError):
qc_comp.download_mibitracker_data(
MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD, 'bad_run_name', 'bad_run_label',
temp_dir, '', ''
)
# bad fovs provided
with pytest.raises(ValueError):
qc_comp.download_mibitracker_data(
MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD,
MIBITRACKER_RUN_NAME, MIBITRACKER_RUN_LABEL,
temp_dir, '', '', fovs=['Point0', 'Point1']
)
# bad channels provided
with pytest.raises(ValueError):
qc_comp.download_mibitracker_data(
MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD,
MIBITRACKER_RUN_NAME, MIBITRACKER_RUN_LABEL,
temp_dir, '', '', channels=['B', 'C']
)
# ensure test to remove tiff_dir if it already exists runs
os.mkdir(os.path.join(temp_dir, 'sample_tiff_dir'))
# error check: tiff_dir that already exists provided with overwrite_tiff_dir=False
with pytest.raises(ValueError):
qc_comp.download_mibitracker_data(
MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD,
MIBITRACKER_RUN_NAME, MIBITRACKER_RUN_LABEL,
temp_dir, 'sample_tiff_dir', overwrite_tiff_dir=False,
img_sub_folder=test_sub_folder, fovs=test_fovs, channels=test_chans
)
# run the data
run_order = qc_comp.download_mibitracker_data(
MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD,
MIBITRACKER_RUN_NAME, MIBITRACKER_RUN_LABEL,
temp_dir, 'sample_tiff_dir', overwrite_tiff_dir=True,
img_sub_folder=test_sub_folder, fovs=test_fovs, channels=test_chans
)
# for testing purposes, set test_fovs and test_chans to all fovs and channels
# if they're set to None
if test_fovs is None:
test_fovs = ['Point%d' % i for i in np.arange(1, 13)]
if test_chans is None:
test_chans = [
'CD115', 'C', 'Au', 'CCL8', 'CD11c', 'Ca', 'Background',
'CD11b', 'CD192', 'CD19', 'CD206', 'CD25', 'CD4', 'CD45.1',
'CD3', 'CD31', 'CD49b', 'CD68', 'CD45.2', 'FceRI', 'DNA', 'CD8',
'F4-80', 'Fe', 'IL-1B', 'Ly-6C', 'FRB', 'Lyve1', 'Ly-6G', 'MHCII',
'Na', 'Si', 'SMA', 'P', 'Ta', 'TREM2'
]
# set the sub folder to a blank string if None
if test_sub_folder is None:
test_sub_folder = ""
# get the contents of tiff_dir
tiff_dir_contents = os.listdir(os.path.join(temp_dir, 'sample_tiff_dir'))
# assert all the fovs are contained in the dir
tiff_dir_fovs = [d for d in tiff_dir_contents if
os.path.isdir(os.path.join(temp_dir, 'sample_tiff_dir', d))]
misc_utils.verify_same_elements(
created_fov_dirs=tiff_dir_fovs,
provided_fov_dirs=test_fovs
)
# assert for each fov the channels created are correct
for fov in tiff_dir_fovs:
# list all the files in the fov folder (and sub folder)
# remove file extensions so raw channel names are extracted
channel_files = io_utils.remove_file_extensions(os.listdir(
os.path.join(temp_dir, 'sample_tiff_dir', fov, test_sub_folder)
))
# assert the channel names are the same
misc_utils.verify_same_elements(
create_channels=channel_files,
provided_channels=test_chans
)
# assert that the run order created is correct for both points and ids
run_fov_names = [ro[0] for ro in run_order]
run_fov_ids = [ro[1] for ro in run_order]
assert run_fov_names == actual_points
assert run_fov_ids == actual_ids
def test_compute_nonzero_mean_intensity():
# test on a zero array
sample_img_arr = np.zeros((3, 3))
sample_nonzero_mean = qc_comp.compute_nonzero_mean_intensity(sample_img_arr)
assert sample_nonzero_mean == 0
# test on a non-zero array
sample_img_arr = np.array([[0, 1, 2], [3, 0, 0], [0, 4, 5]])
sample_nonzero_mean = qc_comp.compute_nonzero_mean_intensity(sample_img_arr)
assert sample_nonzero_mean == 3
def test_compute_total_intensity():
sample_img_arr = np.array([[0, 1, 2], [3, 0, 0], [0, 4, 5]])
sample_total_intensity = qc_comp.compute_total_intensity(sample_img_arr)
assert sample_total_intensity == 15
def test_compute_99_9_intensity():
sample_img_arr = np.array([[0, 1, 2], [3, 0, 0], [0, 4, 5]])
sample_99_9_intensity = qc_comp.compute_99_9_intensity(sample_img_arr)
assert np.allclose(sample_99_9_intensity, 5, rtol=1e-02)
def test_sort_bin_file_fovs():
# test without suffix ignore
fov_list = [
'fov-2-scan-2', 'fov-10-scan-1', 'fov-5-scan-3', 'fov-2-scan-10', 'fov-200-scan-4'
]
fov_list_sorted = qc_comp.sort_bin_file_fovs(fov_list)
assert fov_list_sorted == [
'fov-2-scan-2', 'fov-2-scan-10', 'fov-5-scan-3', 'fov-10-scan-1', 'fov-200-scan-4'
]
# test with a suffix on some fovs
fov_list_some_suffix = fov_list[:]
fov_list_some_suffix[:2] = [f + '_suffix.csv' for f in fov_list[:2]]
fov_list_sorted = qc_comp.sort_bin_file_fovs(fov_list_some_suffix, suffix_ignore='_suffix.csv')
assert fov_list_sorted == [
'fov-2-scan-2_suffix.csv', 'fov-2-scan-10',
'fov-5-scan-3', 'fov-10-scan-1_suffix.csv', 'fov-200-scan-4'
]
# test with a suffix on all fovs
fov_list_all_suffix = [f + '_suffix.csv' for f in fov_list]
fov_list_sorted = qc_comp.sort_bin_file_fovs(fov_list_all_suffix, suffix_ignore='_suffix.csv')
assert fov_list_sorted == [
'fov-2-scan-2_suffix.csv', 'fov-2-scan-10_suffix.csv',
'fov-5-scan-3_suffix.csv', 'fov-10-scan-1_suffix.csv',
'fov-200-scan-4_suffix.csv'
]
# NOTE: we don't need to test iteration over multiple FOVs because
# test_compute_qc_metrics computes on 1 FOV at a time
@parametrize("gaussian_blur", [False, True])
@parametrize("bin_file_folder, fovs",
[('moly', ['fov-1-scan-1']), ('tissue', ['fov-1-scan-1'])])
def test_compute_qc_metrics(gaussian_blur, bin_file_folder, fovs):
with tempfile.TemporaryDirectory() as temp_dir:
# define a sample panel, leave panel correctness/incorrectness test for mibi_bin_tools
panel = pd.DataFrame([{
'Mass': 89,
'Target': 'SMA',
'Start': 88.7,
'Stop': 89.0,
}])
# write the panel to csv
panel_path = os.path.join(temp_dir, 'sample_panel.csv')
panel.to_csv(panel_path, index=False)
# define the full path to the bin file folder
bin_file_path = os.path.join(Path(__file__).parent, 'data', bin_file_folder)
# define a sample qc_path to write to
qc_path = os.path.join(temp_dir, 'sample_qc_dir')
# bin folder error check
with pytest.raises(FileNotFoundError):
qc_comp.compute_qc_metrics(
'bad_bin_path', fovs[0], panel_path, gaussian_blur
)
# panel file error check
with pytest.raises(FileNotFoundError):
qc_comp.compute_qc_metrics(
bin_file_path, fovs[0], 'bad_panel_path', gaussian_blur
)
# fov error check
with pytest.raises(FileNotFoundError):
qc_comp.compute_qc_metrics(
bin_file_path, 'bad_fov', panel_path, gaussian_blur
)
# first time: create new files, also asserts qc_path is created
qc_comp.compute_qc_metrics(
bin_file_path, fovs[0], panel_path, gaussian_blur
)
for ms, mc in zip(settings.QC_SUFFIXES, settings.QC_COLUMNS):
# assert the file for this QC metric was created
metric_path = os.path.join(bin_file_path, '%s_%s.csv' % (fovs[0], ms))
assert os.path.exists(metric_path)
# read the data for this QC metric
metric_data = pd.read_csv(metric_path)
# assert the column names are correct
assert list(metric_data.columns.values) == ['fov', 'channel', mc]
# assert the correct FOV was written
assert list(metric_data['fov']) == [fovs[0]]
# assert the correct channels were written
assert list(metric_data['channel']) == ['SMA']
@parametrize('fovs', [['fov-1-scan-1'], ['fov-1-scan-1', 'fov-2-scan-1']])
def test_combine_qc_metrics(fovs):
with tempfile.TemporaryDirectory() as temp_dir:
# bin folder error check
with pytest.raises(FileNotFoundError):
qc_comp.combine_qc_metrics('bad_bin_path')
# define a dummy list of channels
chans = ['SMA', 'Vimentin', 'Au']
# define a sample bin_file_path
bin_file_path = os.path.join(temp_dir, 'sample_qc_dir')
os.mkdir(bin_file_path)
# put some random stuff in bin_file_path, test that this does not affect aggregation
pd.DataFrame().to_csv(os.path.join(bin_file_path, 'random.csv'))
Path(os.path.join(bin_file_path, 'random.txt')).touch()
# the start value to generate dummy data from for each QC metric
metric_start_vals = [2, 3, 4]
# define sample .csv files for each QC metric
for ms, mv, mc in zip(settings.QC_SUFFIXES, metric_start_vals, settings.QC_COLUMNS):
# add existing combined .csv files, these should not be included in aggregation
pd.DataFrame().to_csv(os.path.join(bin_file_path, 'combined_%s.csv' % ms))
# create a sample dataframe for the QC metric
for i, fov in enumerate(fovs):
fov_qc_data = pd.DataFrame(
np.zeros((3, 3)), columns=['fov', 'channel', mc]
)
# write the FOV name in
fov_qc_data['fov'] = fov
# write the channel names in
fov_qc_data['channel'] = chans
# write the QC metric data in, we'll include different values for each
fov_qc_data[mc] = np.arange(mv * (i + 1), mv * (i + 1) + 3)
# write the dummy QC data
fov_qc_data.to_csv(
os.path.join(bin_file_path, '%s_%s.csv' % (fov, ms)), index=False
)
# run the aggregation function
qc_comp.combine_qc_metrics(bin_file_path)
for ms, mv, mc in zip(settings.QC_SUFFIXES, metric_start_vals, settings.QC_COLUMNS):
# assert the combined QC metric file was created
combined_qc_path = os.path.join(bin_file_path, 'combined_%s.csv' % ms)
assert os.path.exists(combined_qc_path)
# read in the combined QC data
metric_data = pd.read_csv(combined_qc_path)
# assert the column names are correct
assert list(metric_data.columns.values) == ['fov', 'channel', mc]
# assert the correct FOVs are written
assert list(metric_data['fov']) == list(np.repeat(fovs, len(chans)))
# assert the correct channels are written
assert list(metric_data['channel'] == chans * len(fovs))
# assert the correct QC metric values were written
qc_metric_vals = []
for i in range(len(fovs)):
qc_metric_vals.extend(range(mv * (i + 1), mv * (i + 1) + 3))
assert list(metric_data[mc]) == qc_metric_vals
def test_visualize_qc_metrics():
# define the channels to use
chans = ['chan0', 'chan1', 'chan2']
# define the fov names to use for each channel
fov_batches = [['fov0', 'fov1'], ['fov2', 'fov3'], ['fov4', 'fov5']]
# define the test melted DataFrame for an arbitrary QC metric
sample_qc_metric_data = pd.DataFrame()
# for each channel append a random set of data for each fov associated with the QC metric
for chan, fovs in zip(chans, fov_batches):
chan_data = pd.DataFrame(np.random.rand(len(fovs)), columns=['sample_qc_metric'])
chan_data['fov'] = fovs
chan_data['channel'] = chan
sample_qc_metric_data = pd.concat([sample_qc_metric_data, chan_data])
with tempfile.TemporaryDirectory() as temp_dir:
# test without saving
qc_comp.visualize_qc_metrics(sample_qc_metric_data, 'sample_qc_metric')
assert not os.path.exists(os.path.join(temp_dir, 'sample_qc_metric_barplot_stats.png'))
# test with saving
qc_comp.visualize_qc_metrics(sample_qc_metric_data, 'sample_qc_metric', save_dir=temp_dir)
assert os.path.exists(os.path.join(temp_dir, 'sample_qc_metric_barplot_stats.png'))
| [
"os.mkdir",
"toffy.qc_comp.visualize_qc_metrics",
"pandas.read_csv",
"numpy.allclose",
"toffy.qc_comp.download_mibitracker_data",
"pathlib.Path",
"toffy.qc_comp.compute_nonzero_mean_intensity",
"numpy.arange",
"pytest.mark.parametrize",
"os.path.join",
"pandas.DataFrame",
"tempfile.TemporaryDi... | [((1974, 2093), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_fovs,test_chans,test_sub_folder,actual_points,actual_ids"""', 'FOVS_CHANS_TEST_MIBI'], {}), "(\n 'test_fovs,test_chans,test_sub_folder,actual_points,actual_ids',\n FOVS_CHANS_TEST_MIBI)\n", (1997, 2093), False, 'import pytest\n'), ((1685, 1755), 'toffy.qc_comp.create_mibitracker_request_helper', 'qc_comp.create_mibitracker_request_helper', (['"""bad_email"""', '"""bad_password"""'], {}), "('bad_email', 'bad_password')\n", (1726, 1755), False, 'from toffy import qc_comp\n'), ((1852, 1938), 'toffy.qc_comp.create_mibitracker_request_helper', 'qc_comp.create_mibitracker_request_helper', (['MIBITRACKER_EMAIL', 'MIBITRACKER_PASSWORD'], {}), '(MIBITRACKER_EMAIL,\n MIBITRACKER_PASSWORD)\n', (1893, 1938), False, 'from toffy import qc_comp\n'), ((6512, 6528), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (6520, 6528), True, 'import numpy as np\n'), ((6555, 6609), 'toffy.qc_comp.compute_nonzero_mean_intensity', 'qc_comp.compute_nonzero_mean_intensity', (['sample_img_arr'], {}), '(sample_img_arr)\n', (6593, 6609), False, 'from toffy import qc_comp\n'), ((6699, 6742), 'numpy.array', 'np.array', (['[[0, 1, 2], [3, 0, 0], [0, 4, 5]]'], {}), '([[0, 1, 2], [3, 0, 0], [0, 4, 5]])\n', (6707, 6742), True, 'import numpy as np\n'), ((6769, 6823), 'toffy.qc_comp.compute_nonzero_mean_intensity', 'qc_comp.compute_nonzero_mean_intensity', (['sample_img_arr'], {}), '(sample_img_arr)\n', (6807, 6823), False, 'from toffy import qc_comp\n'), ((6919, 6962), 'numpy.array', 'np.array', (['[[0, 1, 2], [3, 0, 0], [0, 4, 5]]'], {}), '([[0, 1, 2], [3, 0, 0], [0, 4, 5]])\n', (6927, 6962), True, 'import numpy as np\n'), ((6992, 7039), 'toffy.qc_comp.compute_total_intensity', 'qc_comp.compute_total_intensity', (['sample_img_arr'], {}), '(sample_img_arr)\n', (7023, 7039), False, 'from toffy import qc_comp\n'), ((7138, 7181), 'numpy.array', 'np.array', (['[[0, 1, 2], [3, 0, 0], [0, 4, 5]]'], {}), '([[0, 1, 2], [3, 0, 0], [0, 4, 5]])\n', (7146, 7181), True, 'import numpy as np\n'), ((7210, 7256), 'toffy.qc_comp.compute_99_9_intensity', 'qc_comp.compute_99_9_intensity', (['sample_img_arr'], {}), '(sample_img_arr)\n', (7240, 7256), False, 'from toffy import qc_comp\n'), ((7268, 7316), 'numpy.allclose', 'np.allclose', (['sample_99_9_intensity', '(5)'], {'rtol': '(0.01)'}), '(sample_99_9_intensity, 5, rtol=0.01)\n', (7279, 7316), True, 'import numpy as np\n'), ((7520, 7556), 'toffy.qc_comp.sort_bin_file_fovs', 'qc_comp.sort_bin_file_fovs', (['fov_list'], {}), '(fov_list)\n', (7546, 7556), False, 'from toffy import qc_comp\n'), ((7859, 7936), 'toffy.qc_comp.sort_bin_file_fovs', 'qc_comp.sort_bin_file_fovs', (['fov_list_some_suffix'], {'suffix_ignore': '"""_suffix.csv"""'}), "(fov_list_some_suffix, suffix_ignore='_suffix.csv')\n", (7885, 7936), False, 'from toffy import qc_comp\n'), ((8220, 8296), 'toffy.qc_comp.sort_bin_file_fovs', 'qc_comp.sort_bin_file_fovs', (['fov_list_all_suffix'], {'suffix_ignore': '"""_suffix.csv"""'}), "(fov_list_all_suffix, suffix_ignore='_suffix.csv')\n", (8246, 8296), False, 'from toffy import qc_comp\n'), ((14437, 14451), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14449, 14451), True, 'import pandas as pd\n'), ((2242, 2271), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2269, 2271), False, 'import tempfile\n'), ((4002, 4257), 'toffy.qc_comp.download_mibitracker_data', 'qc_comp.download_mibitracker_data', (['MIBITRACKER_EMAIL', 'MIBITRACKER_PASSWORD', 'MIBITRACKER_RUN_NAME', 'MIBITRACKER_RUN_LABEL', 'temp_dir', '"""sample_tiff_dir"""'], {'overwrite_tiff_dir': '(True)', 'img_sub_folder': 'test_sub_folder', 'fovs': 'test_fovs', 'channels': 'test_chans'}), "(MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD,\n MIBITRACKER_RUN_NAME, MIBITRACKER_RUN_LABEL, temp_dir,\n 'sample_tiff_dir', overwrite_tiff_dir=True, img_sub_folder=\n test_sub_folder, fovs=test_fovs, channels=test_chans)\n", (4035, 4257), False, 'from toffy import qc_comp\n'), ((5413, 5509), 'ark.utils.misc_utils.verify_same_elements', 'misc_utils.verify_same_elements', ([], {'created_fov_dirs': 'tiff_dir_fovs', 'provided_fov_dirs': 'test_fovs'}), '(created_fov_dirs=tiff_dir_fovs,\n provided_fov_dirs=test_fovs)\n', (5444, 5509), True, 'import ark.utils.misc_utils as misc_utils\n'), ((8852, 8881), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (8879, 8881), False, 'import tempfile\n'), ((9006, 9080), 'pandas.DataFrame', 'pd.DataFrame', (["[{'Mass': 89, 'Target': 'SMA', 'Start': 88.7, 'Stop': 89.0}]"], {}), "([{'Mass': 89, 'Target': 'SMA', 'Start': 88.7, 'Stop': 89.0}])\n", (9018, 9080), True, 'import pandas as pd\n'), ((9195, 9237), 'os.path.join', 'os.path.join', (['temp_dir', '"""sample_panel.csv"""'], {}), "(temp_dir, 'sample_panel.csv')\n", (9207, 9237), False, 'import os\n'), ((9489, 9528), 'os.path.join', 'os.path.join', (['temp_dir', '"""sample_qc_dir"""'], {}), "(temp_dir, 'sample_qc_dir')\n", (9501, 9528), False, 'import os\n'), ((10215, 10292), 'toffy.qc_comp.compute_qc_metrics', 'qc_comp.compute_qc_metrics', (['bin_file_path', 'fovs[0]', 'panel_path', 'gaussian_blur'], {}), '(bin_file_path, fovs[0], panel_path, gaussian_blur)\n', (10241, 10292), False, 'from toffy import qc_comp\n'), ((11148, 11177), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (11175, 11177), False, 'import tempfile\n'), ((11476, 11515), 'os.path.join', 'os.path.join', (['temp_dir', '"""sample_qc_dir"""'], {}), "(temp_dir, 'sample_qc_dir')\n", (11488, 11515), False, 'import os\n'), ((11524, 11547), 'os.mkdir', 'os.mkdir', (['bin_file_path'], {}), '(bin_file_path)\n', (11532, 11547), False, 'import os\n'), ((13021, 13062), 'toffy.qc_comp.combine_qc_metrics', 'qc_comp.combine_qc_metrics', (['bin_file_path'], {}), '(bin_file_path)\n', (13047, 13062), False, 'from toffy import qc_comp\n'), ((14785, 14830), 'pandas.concat', 'pd.concat', (['[sample_qc_metric_data, chan_data]'], {}), '([sample_qc_metric_data, chan_data])\n', (14794, 14830), True, 'import pandas as pd\n'), ((14841, 14870), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (14868, 14870), False, 'import tempfile\n'), ((14922, 14993), 'toffy.qc_comp.visualize_qc_metrics', 'qc_comp.visualize_qc_metrics', (['sample_qc_metric_data', '"""sample_qc_metric"""'], {}), "(sample_qc_metric_data, 'sample_qc_metric')\n", (14950, 14993), False, 'from toffy import qc_comp\n'), ((15126, 15220), 'toffy.qc_comp.visualize_qc_metrics', 'qc_comp.visualize_qc_metrics', (['sample_qc_metric_data', '"""sample_qc_metric"""'], {'save_dir': 'temp_dir'}), "(sample_qc_metric_data, 'sample_qc_metric',\n save_dir=temp_dir)\n", (15154, 15220), False, 'from toffy import qc_comp\n'), ((2343, 2375), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (2356, 2375), False, 'import pytest\n'), ((2389, 2462), 'toffy.qc_comp.download_mibitracker_data', 'qc_comp.download_mibitracker_data', (['""""""', '""""""', '""""""', '""""""', '"""bad_base_dir"""', '""""""', '""""""'], {}), "('', '', '', '', 'bad_base_dir', '', '')\n", (2422, 2462), False, 'from toffy import qc_comp\n'), ((2539, 2564), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2552, 2564), False, 'import pytest\n'), ((2578, 2707), 'toffy.qc_comp.download_mibitracker_data', 'qc_comp.download_mibitracker_data', (['MIBITRACKER_EMAIL', 'MIBITRACKER_PASSWORD', '"""bad_run_name"""', '"""bad_run_label"""', 'temp_dir', '""""""', '""""""'], {}), "(MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD,\n 'bad_run_name', 'bad_run_label', temp_dir, '', '')\n", (2611, 2707), False, 'from toffy import qc_comp\n'), ((2792, 2817), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2805, 2817), False, 'import pytest\n'), ((2831, 3004), 'toffy.qc_comp.download_mibitracker_data', 'qc_comp.download_mibitracker_data', (['MIBITRACKER_EMAIL', 'MIBITRACKER_PASSWORD', 'MIBITRACKER_RUN_NAME', 'MIBITRACKER_RUN_LABEL', 'temp_dir', '""""""', '""""""'], {'fovs': "['Point0', 'Point1']"}), "(MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD,\n MIBITRACKER_RUN_NAME, MIBITRACKER_RUN_LABEL, temp_dir, '', '', fovs=[\n 'Point0', 'Point1'])\n", (2864, 3004), False, 'from toffy import qc_comp\n'), ((3104, 3129), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3117, 3129), False, 'import pytest\n'), ((3143, 3310), 'toffy.qc_comp.download_mibitracker_data', 'qc_comp.download_mibitracker_data', (['MIBITRACKER_EMAIL', 'MIBITRACKER_PASSWORD', 'MIBITRACKER_RUN_NAME', 'MIBITRACKER_RUN_LABEL', 'temp_dir', '""""""', '""""""'], {'channels': "['B', 'C']"}), "(MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD,\n MIBITRACKER_RUN_NAME, MIBITRACKER_RUN_LABEL, temp_dir, '', '', channels\n =['B', 'C'])\n", (3176, 3310), False, 'from toffy import qc_comp\n'), ((3449, 3490), 'os.path.join', 'os.path.join', (['temp_dir', '"""sample_tiff_dir"""'], {}), "(temp_dir, 'sample_tiff_dir')\n", (3461, 3490), False, 'import os\n'), ((3597, 3622), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3610, 3622), False, 'import pytest\n'), ((3636, 3892), 'toffy.qc_comp.download_mibitracker_data', 'qc_comp.download_mibitracker_data', (['MIBITRACKER_EMAIL', 'MIBITRACKER_PASSWORD', 'MIBITRACKER_RUN_NAME', 'MIBITRACKER_RUN_LABEL', 'temp_dir', '"""sample_tiff_dir"""'], {'overwrite_tiff_dir': '(False)', 'img_sub_folder': 'test_sub_folder', 'fovs': 'test_fovs', 'channels': 'test_chans'}), "(MIBITRACKER_EMAIL, MIBITRACKER_PASSWORD,\n MIBITRACKER_RUN_NAME, MIBITRACKER_RUN_LABEL, temp_dir,\n 'sample_tiff_dir', overwrite_tiff_dir=False, img_sub_folder=\n test_sub_folder, fovs=test_fovs, channels=test_chans)\n", (3669, 3892), False, 'from toffy import qc_comp\n'), ((5163, 5204), 'os.path.join', 'os.path.join', (['temp_dir', '"""sample_tiff_dir"""'], {}), "(temp_dir, 'sample_tiff_dir')\n", (5175, 5204), False, 'import os\n'), ((6010, 6106), 'ark.utils.misc_utils.verify_same_elements', 'misc_utils.verify_same_elements', ([], {'create_channels': 'channel_files', 'provided_channels': 'test_chans'}), '(create_channels=channel_files,\n provided_channels=test_chans)\n', (6041, 6106), True, 'import ark.utils.misc_utils as misc_utils\n'), ((9576, 9608), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (9589, 9608), False, 'import pytest\n'), ((9622, 9700), 'toffy.qc_comp.compute_qc_metrics', 'qc_comp.compute_qc_metrics', (['"""bad_bin_path"""', 'fovs[0]', 'panel_path', 'gaussian_blur'], {}), "('bad_bin_path', fovs[0], panel_path, gaussian_blur)\n", (9648, 9700), False, 'from toffy import qc_comp\n'), ((9778, 9810), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (9791, 9810), False, 'import pytest\n'), ((9824, 9911), 'toffy.qc_comp.compute_qc_metrics', 'qc_comp.compute_qc_metrics', (['bin_file_path', 'fovs[0]', '"""bad_panel_path"""', 'gaussian_blur'], {}), "(bin_file_path, fovs[0], 'bad_panel_path',\n gaussian_blur)\n", (9850, 9911), False, 'from toffy import qc_comp\n'), ((9978, 10010), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (9991, 10010), False, 'import pytest\n'), ((10024, 10103), 'toffy.qc_comp.compute_qc_metrics', 'qc_comp.compute_qc_metrics', (['bin_file_path', '"""bad_fov"""', 'panel_path', 'gaussian_blur'], {}), "(bin_file_path, 'bad_fov', panel_path, gaussian_blur)\n", (10050, 10103), False, 'from toffy import qc_comp\n'), ((10473, 10529), 'os.path.join', 'os.path.join', (['bin_file_path', "('%s_%s.csv' % (fovs[0], ms))"], {}), "(bin_file_path, '%s_%s.csv' % (fovs[0], ms))\n", (10485, 10529), False, 'import os\n'), ((10549, 10576), 'os.path.exists', 'os.path.exists', (['metric_path'], {}), '(metric_path)\n', (10563, 10576), False, 'import os\n'), ((10651, 10675), 'pandas.read_csv', 'pd.read_csv', (['metric_path'], {}), '(metric_path)\n', (10662, 10675), True, 'import pandas as pd\n'), ((11237, 11269), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (11250, 11269), False, 'import pytest\n'), ((11283, 11325), 'toffy.qc_comp.combine_qc_metrics', 'qc_comp.combine_qc_metrics', (['"""bad_bin_path"""'], {}), "('bad_bin_path')\n", (11309, 11325), False, 'from toffy import qc_comp\n'), ((11672, 11713), 'os.path.join', 'os.path.join', (['bin_file_path', '"""random.csv"""'], {}), "(bin_file_path, 'random.csv')\n", (11684, 11713), False, 'import os\n'), ((13249, 13300), 'os.path.join', 'os.path.join', (['bin_file_path', "('combined_%s.csv' % ms)"], {}), "(bin_file_path, 'combined_%s.csv' % ms)\n", (13261, 13300), False, 'import os\n'), ((13320, 13352), 'os.path.exists', 'os.path.exists', (['combined_qc_path'], {}), '(combined_qc_path)\n', (13334, 13352), False, 'import os\n'), ((13423, 13452), 'pandas.read_csv', 'pd.read_csv', (['combined_qc_path'], {}), '(combined_qc_path)\n', (13434, 13452), True, 'import pandas as pd\n'), ((15247, 15307), 'os.path.join', 'os.path.join', (['temp_dir', '"""sample_qc_metric_barplot_stats.png"""'], {}), "(temp_dir, 'sample_qc_metric_barplot_stats.png')\n", (15259, 15307), False, 'import os\n'), ((9376, 9390), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (9380, 9390), False, 'from pathlib import Path\n'), ((11650, 11664), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11662, 11664), True, 'import pandas as pd\n'), ((12165, 12216), 'os.path.join', 'os.path.join', (['bin_file_path', "('combined_%s.csv' % ms)"], {}), "(bin_file_path, 'combined_%s.csv' % ms)\n", (12177, 12216), False, 'import os\n'), ((12748, 12789), 'numpy.arange', 'np.arange', (['(mv * (i + 1))', '(mv * (i + 1) + 3)'], {}), '(mv * (i + 1), mv * (i + 1) + 3)\n', (12757, 12789), True, 'import numpy as np\n'), ((15028, 15088), 'os.path.join', 'os.path.join', (['temp_dir', '"""sample_qc_metric_barplot_stats.png"""'], {}), "(temp_dir, 'sample_qc_metric_barplot_stats.png')\n", (15040, 15088), False, 'import os\n'), ((4501, 4517), 'numpy.arange', 'np.arange', (['(1)', '(13)'], {}), '(1, 13)\n', (4510, 4517), True, 'import numpy as np\n'), ((5358, 5402), 'os.path.join', 'os.path.join', (['temp_dir', '"""sample_tiff_dir"""', 'd'], {}), "(temp_dir, 'sample_tiff_dir', d)\n", (5370, 5402), False, 'import os\n'), ((5866, 5929), 'os.path.join', 'os.path.join', (['temp_dir', '"""sample_tiff_dir"""', 'fov', 'test_sub_folder'], {}), "(temp_dir, 'sample_tiff_dir', fov, test_sub_folder)\n", (5878, 5929), False, 'import os\n'), ((11728, 11769), 'os.path.join', 'os.path.join', (['bin_file_path', '"""random.txt"""'], {}), "(bin_file_path, 'random.txt')\n", (11740, 11769), False, 'import os\n'), ((12143, 12157), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (12155, 12157), True, 'import pandas as pd\n'), ((12384, 12400), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (12392, 12400), True, 'import numpy as np\n'), ((12889, 12941), 'os.path.join', 'os.path.join', (['bin_file_path', "('%s_%s.csv' % (fov, ms))"], {}), "(bin_file_path, '%s_%s.csv' % (fov, ms))\n", (12901, 12941), False, 'import os\n')] |
"""Comparison of RBF and polynomial kernels for SVM"""
import numpy as np
from pmlb import classification_dataset_names, fetch_data
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.model_selection import (GridSearchCV, cross_val_score,
train_test_split)
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC, SVR
poly = GridSearchCV(
make_pipeline(StandardScaler(), SVC()),
{
'svc__kernel': ['poly'],
'svc__degree': [2, 3],
'svc__coef0': np.logspace(-3, 3, 13),
'svc__C': np.logspace(-7, 5, 13)
},
cv=5,
n_jobs=-1
)
rbf = GridSearchCV(
make_pipeline(StandardScaler(), SVC()),
{
'svc__kernel': ['rbf'],
'svc__gamma': np.logspace(-3, 3, 13),
'svc__C': np.logspace(-7, 5, 13)
},
cv=5,
n_jobs=-1
)
dum = GridSearchCV(
make_pipeline(StandardScaler(), DummyClassifier()),
{
'dummyclassifier__strategy': ['stratified', 'most_frequent', 'uniform']
},
cv=5,
n_jobs=-1
)
n_max = 256
for dataset in classification_dataset_names:
X, y = fetch_data(dataset, True)
# maximum n_max samples
if len(y) > n_max:
S = np.random.permutation(len(y))[:n_max]
I = np.zeros(len(y))
I[S] = 1
I = I > 0
X = X[I]
y = y[I]
pscores = cross_val_score(poly, X, y, cv=5, n_jobs=-1)
rscores = cross_val_score(rbf, X, y, cv=5, n_jobs=-1)
dscores = cross_val_score(dum, X, y, cv=5, n_jobs=-1)
names = ['RBF', "Poly", "Dummy"]
values = [np.round(x,2) for x in [np.mean(rscores), np.mean(pscores), np.mean(dscores)]]
result = "[" + ", ".join(["'%s': %s" % (a, b) for a, b in zip(names, values)]) + "],"
print(result) | [
"sklearn.dummy.DummyClassifier",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.cross_val_score",
"numpy.logspace",
"pmlb.fetch_data",
"numpy.mean",
"sklearn.svm.SVC",
"numpy.round"
] | [((1183, 1208), 'pmlb.fetch_data', 'fetch_data', (['dataset', '(True)'], {}), '(dataset, True)\n', (1193, 1208), False, 'from pmlb import classification_dataset_names, fetch_data\n'), ((1433, 1477), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['poly', 'X', 'y'], {'cv': '(5)', 'n_jobs': '(-1)'}), '(poly, X, y, cv=5, n_jobs=-1)\n', (1448, 1477), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split\n'), ((1492, 1535), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['rbf', 'X', 'y'], {'cv': '(5)', 'n_jobs': '(-1)'}), '(rbf, X, y, cv=5, n_jobs=-1)\n', (1507, 1535), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split\n'), ((1550, 1593), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['dum', 'X', 'y'], {'cv': '(5)', 'n_jobs': '(-1)'}), '(dum, X, y, cv=5, n_jobs=-1)\n', (1565, 1593), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split\n'), ((478, 494), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (492, 494), False, 'from sklearn.preprocessing import StandardScaler\n'), ((496, 501), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (499, 501), False, 'from sklearn.svm import SVC, SVR\n'), ((596, 618), 'numpy.logspace', 'np.logspace', (['(-3)', '(3)', '(13)'], {}), '(-3, 3, 13)\n', (607, 618), True, 'import numpy as np\n'), ((638, 660), 'numpy.logspace', 'np.logspace', (['(-7)', '(5)', '(13)'], {}), '(-7, 5, 13)\n', (649, 660), True, 'import numpy as np\n'), ((733, 749), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (747, 749), False, 'from sklearn.preprocessing import StandardScaler\n'), ((751, 756), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (754, 756), False, 'from sklearn.svm import SVC, SVR\n'), ((819, 841), 'numpy.logspace', 'np.logspace', (['(-3)', '(3)', '(13)'], {}), '(-3, 3, 13)\n', (830, 841), True, 'import numpy as np\n'), ((861, 883), 'numpy.logspace', 'np.logspace', (['(-7)', '(5)', '(13)'], {}), '(-7, 5, 13)\n', (872, 883), True, 'import numpy as np\n'), ((956, 972), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (970, 972), False, 'from sklearn.preprocessing import StandardScaler\n'), ((974, 991), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {}), '()\n', (989, 991), False, 'from sklearn.dummy import DummyClassifier, DummyRegressor\n'), ((1646, 1660), 'numpy.round', 'np.round', (['x', '(2)'], {}), '(x, 2)\n', (1654, 1660), True, 'import numpy as np\n'), ((1670, 1686), 'numpy.mean', 'np.mean', (['rscores'], {}), '(rscores)\n', (1677, 1686), True, 'import numpy as np\n'), ((1688, 1704), 'numpy.mean', 'np.mean', (['pscores'], {}), '(pscores)\n', (1695, 1704), True, 'import numpy as np\n'), ((1706, 1722), 'numpy.mean', 'np.mean', (['dscores'], {}), '(dscores)\n', (1713, 1722), True, 'import numpy as np\n')] |
import numpy as np
import sys
'''
v0.2 Nov. 23, 2017
- add test_circumcenterSphTri()
v0.1 Nov. 23, 2017
- add calc_xc()
- add calc_gamma()
- add calc_beta()
- add calc_denom()
- add calc_alpha()
- add calc_dotProduct_2d()
- add calc_verticesDistance()
- add circumcenterSphTri()
'''
def calc_dotProduct(x1, x2):
ndim = x1.ndim
if ndim == 1:
return np.sum(np.dot(x1, x2))
res = []
for idx in range(ndim):
res += [np.dot(x1[idx], x2[idx])]
return np.array(res)
def calc_verticesDistance(xs, ys, zs, tris, idx0, idx1):
tri0 = tris[:, idx0]
tri1 = tris[:, idx1]
res = []
for pos0, pos1 in zip(tri0, tri1):
wrk = xs[pos0] * xs[pos1]
wrk += ys[pos0] * ys[pos1]
wrk += zs[pos0] * zs[pos1]
res += [2.0 * (1.0 - wrk)]
return res
def calc_denom(tris, nodes):
# 1. calc cross
lhs = []
rhs = []
for pos0, pos1, pos2 in zip(tris[:, 0], tris[:, 1], tris[:, 2]):
lhs += [nodes[pos0, :] - nodes[pos1, :]]
rhs += [nodes[pos0, :] - nodes[pos2, :]]
crs = np.cross(lhs, rhs)
# 2. calc denom
res = []
for elem in np.array(crs*crs):
wrk = np.sum(elem)
res += [2.0*wrk]
return res
def calc_alpha(tris, nodes, rd2):
dpt = []
for pos0, pos1, pos2 in zip(tris[:, 0], tris[:, 1], tris[:, 2]):
lhs = nodes[pos0, :] - nodes[pos1, :]
rhs = nodes[pos0, :] - nodes[pos2, :]
dpt += [calc_dotProduct(lhs, rhs)]
res = rd2[1] * dpt
return res
def calc_beta(tris, nodes, rd2):
dpt = []
for pos0, pos1, pos2 in zip(tris[:, 0], tris[:, 1], tris[:, 2]):
lhs = nodes[pos1, :] - nodes[pos0, :]
rhs = nodes[pos1, :] - nodes[pos2, :]
dpt += [calc_dotProduct(lhs, rhs)]
return rd2[2] * dpt
def calc_gamma(tris, nodes, rd2):
dpt = []
for pos0, pos1, pos2 in zip(tris[:, 0], tris[:, 1], tris[:, 2]):
lhs = nodes[pos2, :] - nodes[pos0, :]
rhs = nodes[pos2, :] - nodes[pos1, :]
dpt += [calc_dotProduct(lhs, rhs)]
return rd2[0] * dpt
def calc_xc(alpha, beta, gamma, tris, nodes):
res = []
for aalp, abet, agam, pos0, pos1, pos2 in zip(
alpha, beta, gamma,
tris[:, 0], tris[:, 1], tris[:, 2]
):
xc = np.tile(aalp, (1, 3)) * nodes[pos0, :]
xc += np.tile(abet, (1, 3)) * nodes[pos1, :]
xc += np.tile(agam, (1, 3)) * nodes[pos2, :]
res += [*xc]
res = np.array(res)
return res
def circumcenterSphTri(tris, nodes):
xs = nodes[:, 0]
ys = nodes[:, 1]
zs = nodes[:, 2]
rd2 = [calc_verticesDistance(xs, ys, zs, tris, idx0=0, idx1=1)]
rd2 += [calc_verticesDistance(xs, ys, zs, tris, idx0=1, idx1=2)]
rd2 += [calc_verticesDistance(xs, ys, zs, tris, idx0=0, idx1=2)]
rd2 = np.array(rd2)
alpha = calc_alpha(tris, nodes, rd2)
denom = calc_denom(tris, nodes)
alpha = alpha / denom
beta = calc_beta(tris, nodes, rd2)
beta = beta / denom
gamma = calc_gamma(tris, nodes, rd2)
gamma = gamma / denom
xc = calc_xc(alpha, beta, gamma, tris, nodes)
# Project onto the sphere.
x_l2 = []
for elem in xc:
x_l2 += [np.sqrt(np.sum(elem*elem))]
x_l2 = np.array(x_l2)
# ---reshape for [xc / x_l2]
x_l2 = np.repeat(x_l2, len(xc[0]), axis=0)
x_l2 = x_l2.reshape(len(xc), len(xc[0]))
xc = xc / x_l2
return xc
def test_circumcenterSphTri_171123():
# [x0, tri0] = getIcosNodes(4,1)
ans = [[-0.57735027, 0.57735027, 0.57735027],
[-0.35682209, 0.93417236, 0.],
[0.57735027, -0.57735027, 0.57735027],
[-0.93417236, 0., 0.35682209],
[-0.93417236, 0., -0.35682209],
[-0.57735027, -0.57735027, -0.57735027],
[-0.57735027, 0.57735027, -0.57735027],
[0., 0.35682209, 0.93417236],
[0., -0.35682209, 0.93417236],
[0.35682209, 0.93417236, 0.],
[0.57735027, 0.57735027, 0.57735027],
[0.93417236, 0., 0.35682209],
[-0.35682209, -0.93417236, 0.],
[-0.57735027, -0.57735027, 0.57735027],
[0., 0.35682209, -0.93417236],
[0., -0.35682209, -0.93417236],
[0.57735027, -0.57735027, -0.57735027],
[0.35682209, -0.93417236, 0.],
[0.93417236, 0., -0.35682209],
[0.57735027, 0.57735027, -0.57735027]]
ans = np.array(ans)
tris = [
[10, 6, 1],
[3, 10, 1],
[11, 5, 2],
[12, 6, 10],
[8, 12, 10],
[4, 12, 8],
[8, 10, 3],
[5, 1, 6],
[5, 6, 2],
[3, 1, 9],
[5, 9, 1],
[11, 9, 5],
[12, 4, 2],
[6, 12, 2],
[8, 3, 7],
[8, 7, 4],
[11, 4, 7],
[11, 2, 4],
[7, 9, 11],
[3, 9, 7],
]
nodes = [
[0.0000000e+00, 8.5065081e-01, 5.2573111e-01],
[0.0000000e+00, -8.5065081e-01, 5.2573111e-01],
[0.0000000e+00, 8.5065081e-01, -5.2573111e-01],
[0.0000000e+00, -8.5065081e-01, -5.2573111e-01],
[5.2573111e-01, 0.0000000e+00, 8.5065081e-01],
[-5.2573111e-01, 0.0000000e+00, 8.5065081e-01],
[5.2573111e-01, 0.0000000e+00, -8.5065081e-01],
[-5.2573111e-01, 0.0000000e+00, -8.5065081e-01],
[8.5065081e-01, 5.2573111e-01, 0.0000000e+00],
[-8.5065081e-01, 5.2573111e-01, 0.0000000e+00],
[8.5065081e-01, -5.2573111e-01, 0.0000000e+00],
[-8.5065081e-01, -5.2573111e-01, 0.0000000e+00],
]
tris = np.array(tris)
tris = tris - 1 # from indexing [1..] to [0..]
nodes = np.array(nodes)
res = circumcenterSphTri(tris, nodes)
print("---res")
print(res)
print("---answer")
print(ans)
if __name__ == '__main__':
test_circumcenterSphTri_171123()
| [
"numpy.sum",
"numpy.cross",
"numpy.array",
"numpy.tile",
"numpy.dot"
] | [((503, 516), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (511, 516), True, 'import numpy as np\n'), ((1087, 1105), 'numpy.cross', 'np.cross', (['lhs', 'rhs'], {}), '(lhs, rhs)\n', (1095, 1105), True, 'import numpy as np\n'), ((1156, 1175), 'numpy.array', 'np.array', (['(crs * crs)'], {}), '(crs * crs)\n', (1164, 1175), True, 'import numpy as np\n'), ((2481, 2494), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (2489, 2494), True, 'import numpy as np\n'), ((2828, 2841), 'numpy.array', 'np.array', (['rd2'], {}), '(rd2)\n', (2836, 2841), True, 'import numpy as np\n'), ((3248, 3262), 'numpy.array', 'np.array', (['x_l2'], {}), '(x_l2)\n', (3256, 3262), True, 'import numpy as np\n'), ((4442, 4455), 'numpy.array', 'np.array', (['ans'], {}), '(ans)\n', (4450, 4455), True, 'import numpy as np\n'), ((5717, 5731), 'numpy.array', 'np.array', (['tris'], {}), '(tris)\n', (5725, 5731), True, 'import numpy as np\n'), ((5796, 5811), 'numpy.array', 'np.array', (['nodes'], {}), '(nodes)\n', (5804, 5811), True, 'import numpy as np\n'), ((1189, 1201), 'numpy.sum', 'np.sum', (['elem'], {}), '(elem)\n', (1195, 1201), True, 'import numpy as np\n'), ((392, 406), 'numpy.dot', 'np.dot', (['x1', 'x2'], {}), '(x1, x2)\n', (398, 406), True, 'import numpy as np\n'), ((466, 490), 'numpy.dot', 'np.dot', (['x1[idx]', 'x2[idx]'], {}), '(x1[idx], x2[idx])\n', (472, 490), True, 'import numpy as np\n'), ((2305, 2326), 'numpy.tile', 'np.tile', (['aalp', '(1, 3)'], {}), '(aalp, (1, 3))\n', (2312, 2326), True, 'import numpy as np\n'), ((2358, 2379), 'numpy.tile', 'np.tile', (['abet', '(1, 3)'], {}), '(abet, (1, 3))\n', (2365, 2379), True, 'import numpy as np\n'), ((2411, 2432), 'numpy.tile', 'np.tile', (['agam', '(1, 3)'], {}), '(agam, (1, 3))\n', (2418, 2432), True, 'import numpy as np\n'), ((3217, 3236), 'numpy.sum', 'np.sum', (['(elem * elem)'], {}), '(elem * elem)\n', (3223, 3236), True, 'import numpy as np\n')] |
from kivy.properties import ListProperty, ObjectProperty, StringProperty, \
NumericProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.lang import Builder
from kivy.graphics import Color, Line
from kivy.app import App
import numpy as np
import sys
from random import random
import pandas as pd
from kivy.uix.widget import Widget
Builder.load_string('''
#: import platform sys.platform
<LegendLabel>:
orientation: 'horizontal'
Label:
size_hint_x: 0.25
canvas.before:
Color:
hsv: root.hsv + [1]
Line:
width: 1.5
points: [self.x, self.y + self.size[1]/2, self.x + self.size[0], self.y + self.size[1]/2]
Label:
text: root.text
text_size: self.size
font_size: sp(12)
halign: 'center'
valign: 'middle'
<PlotArea>:
<TsPlot>:
orientation: 'vertical'
BoxLayout:
orientation: 'horizontal'
BoxLayout:
orientation: 'vertical'
PlotArea:
id: plot
size_hint_y: 8.0
on_size: self.draw_axes()
BoxLayout:
id: x_ticks
orientation: 'horizontal'
Label:
id: index_label
size_hint_y: None
halign: 'center'
valign: 'top'
font_size: 12 if platform == 'linux' else 24
size: self.texture_size
text: 'index'
BoxLayout:
id: legend
size_hint_x: 0.15
orientation: 'vertical'
''')
class PlotArea(Widget):
""" The graph area of the time series plot """
offset = [50, 20]
bounding_box = ListProperty()
def make_bounding_box(self):
return [[self.x + self.offset[0],
self.y + self.offset[1]],
[self.x + self.size[0] - self.offset[0],
self.y + self.size[1] - self.offset[1]]]
def draw_axes(self):
self.canvas.clear()
self.bounding_box = self.make_bounding_box()
bb = self.bounding_box
with self.canvas:
Color(.9, .9, .9, 1.)
points = [bb[0][0], bb[1][1], bb[0][0], bb[0][1], bb[1][0], bb[0][1]]
Line(width=1 if sys.platform == 'linux' else 1.5, points=points)
def draw_x_ticks(self, num_ticks):
x_length = self.bounding_box[1][0] - self.bounding_box[0][0]
for i in range(num_ticks + 1):
i_len = x_length * i / num_ticks
top = [self.bounding_box[0][0] + i_len, self.bounding_box[0][1]]
bottom = [top[0], top[1] - self.offset[1] / 2]
with self.canvas:
Color(.9, .9, .9, 1.)
Line(width=1 if sys.platform == 'linux' else 1.5, points=bottom+top)
def get_x(self, num_points):
x_scale = (self.bounding_box[1][0] - self.bounding_box[0][0]) \
/ (num_points - 1)
x_trafo = x_scale * np.array(range(num_points)) + self.bounding_box[0][0]
return x_trafo
def transform_y(self, y):
y_scale = (self.bounding_box[1][1] - self.bounding_box[0][1]) \
/ (y.max() - y.min())
y_trafo = y_scale * (y - y.min()) + self.y + self.offset[1]
return y_trafo
def add_line(self, y_points, len, hsv):
x_transformed = self.get_x(len)
y_transformed = self.transform_y(y_points)
xy_points = list()
for x, y, in zip(x_transformed, y_transformed):
if not xy_points or x > xy_points[-2] + 1:
xy_points += [x, y]
with self.canvas:
Color(*hsv, mode='hsv')
Line(points=xy_points, width=1 if sys.platform == 'linux' else 1.5)
class TsPlot(BoxLayout):
""" Time series plot. Can be integrated as a widget into another kivy
app."""
len = 0
x_ticks = 10
df = ObjectProperty(force_dispatch=True, allownone=True)
def draw_axes(self):
self.ids.x_ticks.clear_widgets()
self.ids.legend.clear_widgets()
self.len = 0
self.ids.plot.draw_axes()
def draw_x_ticks(self):
if self.len == 0:
return
self.ids.plot.draw_x_ticks(self.x_ticks)
for i in range(self.x_ticks + 1):
tick_label = Label(text=str(int(i * self.len / self.x_ticks)),
font_size=12 if sys.platform == 'linux' else 24)
self.ids.x_ticks.add_widget(tick_label)
def add_line(self, y_points, idx):
if self.len == 0:
self.len = len(y_points)
self.draw_x_ticks()
hsv = idx / len(self.df.columns), 0.7, 0.8
self.ids.plot.add_line(y_points, self.len, hsv)
l = LegendLabel(text=self.df.columns[idx], hsv=hsv)
self.ids.legend.add_widget(l)
def on_df(self, e=None, z=None):
self.draw_axes()
if self.df is not None:
self.ids.index_label.text = self.df.index.name or 'index'
else:
return
self.len = 0
for i, col in enumerate(self.df.columns):
y = self.df[col]
self.add_line(y, i)
def set_df(self, e):
n = int(random() * 20) + 1
cols = ['very very long line ' + str(i) for i in range(n)]
df = pd.DataFrame(np.random.randn(20, n), columns=cols)
df.index.name = 'My Index'
self.df = df
class LegendLabel(BoxLayout):
hsv = ListProperty()
text = StringProperty()
pass
class GraphApp(App):
""" Test app for time series graph. """
def build(self):
b = BoxLayout(orientation='vertical')
ts_plot = TsPlot()
b.add_widget(ts_plot)
btn = Button(text='Create random series', size_hint_y=0.1)
btn.bind(on_press=ts_plot.set_df)
b.add_widget(btn)
return b
if __name__ == '__main__':
GraphApp().run()
| [
"kivy.properties.ListProperty",
"kivy.graphics.Line",
"kivy.lang.Builder.load_string",
"numpy.random.randn",
"kivy.properties.StringProperty",
"kivy.uix.button.Button",
"kivy.uix.boxlayout.BoxLayout",
"random.random",
"kivy.graphics.Color",
"kivy.properties.ObjectProperty"
] | [((411, 1693), 'kivy.lang.Builder.load_string', 'Builder.load_string', (['"""\n#: import platform sys.platform\n<LegendLabel>:\n orientation: \'horizontal\'\n Label:\n size_hint_x: 0.25\n canvas.before:\n Color:\n hsv: root.hsv + [1]\n Line:\n width: 1.5\n points: [self.x, self.y + self.size[1]/2, self.x + self.size[0], self.y + self.size[1]/2]\n Label:\n text: root.text\n text_size: self.size\n font_size: sp(12)\n halign: \'center\'\n valign: \'middle\'\n\n<PlotArea>:\n\n\n<TsPlot>:\n orientation: \'vertical\'\n BoxLayout:\n orientation: \'horizontal\'\n BoxLayout:\n orientation: \'vertical\'\n PlotArea: \n id: plot\n size_hint_y: 8.0\n on_size: self.draw_axes()\n BoxLayout:\n id: x_ticks\n orientation: \'horizontal\'\n Label:\n id: index_label\n size_hint_y: None\n halign: \'center\'\n valign: \'top\'\n font_size: 12 if platform == \'linux\' else 24\n size: self.texture_size\n text: \'index\'\n BoxLayout:\n id: legend\n size_hint_x: 0.15\n orientation: \'vertical\' \n"""'], {}), '(\n """\n#: import platform sys.platform\n<LegendLabel>:\n orientation: \'horizontal\'\n Label:\n size_hint_x: 0.25\n canvas.before:\n Color:\n hsv: root.hsv + [1]\n Line:\n width: 1.5\n points: [self.x, self.y + self.size[1]/2, self.x + self.size[0], self.y + self.size[1]/2]\n Label:\n text: root.text\n text_size: self.size\n font_size: sp(12)\n halign: \'center\'\n valign: \'middle\'\n\n<PlotArea>:\n\n\n<TsPlot>:\n orientation: \'vertical\'\n BoxLayout:\n orientation: \'horizontal\'\n BoxLayout:\n orientation: \'vertical\'\n PlotArea: \n id: plot\n size_hint_y: 8.0\n on_size: self.draw_axes()\n BoxLayout:\n id: x_ticks\n orientation: \'horizontal\'\n Label:\n id: index_label\n size_hint_y: None\n halign: \'center\'\n valign: \'top\'\n font_size: 12 if platform == \'linux\' else 24\n size: self.texture_size\n text: \'index\'\n BoxLayout:\n id: legend\n size_hint_x: 0.15\n orientation: \'vertical\' \n"""\n )\n', (430, 1693), False, 'from kivy.lang import Builder\n'), ((1802, 1816), 'kivy.properties.ListProperty', 'ListProperty', ([], {}), '()\n', (1814, 1816), False, 'from kivy.properties import ListProperty, ObjectProperty, StringProperty, NumericProperty\n'), ((3979, 4030), 'kivy.properties.ObjectProperty', 'ObjectProperty', ([], {'force_dispatch': '(True)', 'allownone': '(True)'}), '(force_dispatch=True, allownone=True)\n', (3993, 4030), False, 'from kivy.properties import ListProperty, ObjectProperty, StringProperty, NumericProperty\n'), ((5525, 5539), 'kivy.properties.ListProperty', 'ListProperty', ([], {}), '()\n', (5537, 5539), False, 'from kivy.properties import ListProperty, ObjectProperty, StringProperty, NumericProperty\n'), ((5551, 5567), 'kivy.properties.StringProperty', 'StringProperty', ([], {}), '()\n', (5565, 5567), False, 'from kivy.properties import ListProperty, ObjectProperty, StringProperty, NumericProperty\n'), ((5677, 5710), 'kivy.uix.boxlayout.BoxLayout', 'BoxLayout', ([], {'orientation': '"""vertical"""'}), "(orientation='vertical')\n", (5686, 5710), False, 'from kivy.uix.boxlayout import BoxLayout\n'), ((5782, 5834), 'kivy.uix.button.Button', 'Button', ([], {'text': '"""Create random series"""', 'size_hint_y': '(0.1)'}), "(text='Create random series', size_hint_y=0.1)\n", (5788, 5834), False, 'from kivy.uix.button import Button\n'), ((2227, 2252), 'kivy.graphics.Color', 'Color', (['(0.9)', '(0.9)', '(0.9)', '(1.0)'], {}), '(0.9, 0.9, 0.9, 1.0)\n', (2232, 2252), False, 'from kivy.graphics import Color, Line\n'), ((2343, 2407), 'kivy.graphics.Line', 'Line', ([], {'width': "(1 if sys.platform == 'linux' else 1.5)", 'points': 'points'}), "(width=1 if sys.platform == 'linux' else 1.5, points=points)\n", (2347, 2407), False, 'from kivy.graphics import Color, Line\n'), ((3720, 3743), 'kivy.graphics.Color', 'Color', (['*hsv'], {'mode': '"""hsv"""'}), "(*hsv, mode='hsv')\n", (3725, 3743), False, 'from kivy.graphics import Color, Line\n'), ((3756, 3823), 'kivy.graphics.Line', 'Line', ([], {'points': 'xy_points', 'width': "(1 if sys.platform == 'linux' else 1.5)"}), "(points=xy_points, width=1 if sys.platform == 'linux' else 1.5)\n", (3760, 3823), False, 'from kivy.graphics import Color, Line\n'), ((5389, 5411), 'numpy.random.randn', 'np.random.randn', (['(20)', 'n'], {}), '(20, n)\n', (5404, 5411), True, 'import numpy as np\n'), ((2783, 2808), 'kivy.graphics.Color', 'Color', (['(0.9)', '(0.9)', '(0.9)', '(1.0)'], {}), '(0.9, 0.9, 0.9, 1.0)\n', (2788, 2808), False, 'from kivy.graphics import Color, Line\n'), ((2821, 2891), 'kivy.graphics.Line', 'Line', ([], {'width': "(1 if sys.platform == 'linux' else 1.5)", 'points': '(bottom + top)'}), "(width=1 if sys.platform == 'linux' else 1.5, points=bottom + top)\n", (2825, 2891), False, 'from kivy.graphics import Color, Line\n'), ((5277, 5285), 'random.random', 'random', ([], {}), '()\n', (5283, 5285), False, 'from random import random\n')] |
import pymesh
import json
import pathlib
import copy
import math
import itertools
import numpy
from scipy.spatial.transform import Rotation
import context
from fixture_utils import save_fixture, get_fixture_dir_path, get_meshes_dir_path
scale = 1
barring = {
"mesh": "507-movements/227-chain-pully/barring.obj",
"rotation": [90, 0, 0],
"scale": scale
}
pin = {
"mesh": "507-movements/227-chain-pully/pin.obj",
"rotation": [90, 0, 0],
"scale": scale
}
link = {
"mesh": "507-movements/227-chain-pully/link.obj",
"rotation": [90, 0, 0],
"scale": scale
}
link_hole_center = 2.45905
link_width = 2 * link_hole_center
link_vertical_offsets = [0.763387, 0.940965]
def circle_points(num_links, angle_offset=0, radius_offset=0):
angles = numpy.linspace(angle_offset, 2 * numpy.pi + angle_offset,
num=num_links, endpoint=False).reshape(-1, 1)
radius = link_width / (2 * numpy.sin(numpy.pi / num_links)) + radius_offset
x = radius * numpy.cos(angles)
y = radius * numpy.sin(angles)
z = numpy.zeros(angles.shape)
return numpy.hstack([x, y, z])
def line_points(start, dir, num_links):
points = numpy.empty((num_links + 1, 3))
points[0] = start
dir /= numpy.linalg.norm(dir)
for i in range(num_links):
points[i + 1] = points[i] + link_width * dir
return points
def export_polyline(points, offset=0, loops=True):
for point in points:
print("v {:g} {:g} {:g}".format(*point))
for i in range(len(points) - 1):
print(f"l {i + 1 + offset:d} {i + 2 + offset:d}")
if loops:
print(f"l {len(points) + offset:d} {1 + offset:d}")
def polyline_to_chain(points):
"""Assumes the lines are the links length"""
assert((points[0] != points[-1]).any()) # no loop
chain = []
num_points = points.shape[0]
assert(num_points % 2 == 0)
for i in range(num_points):
pi0 = points[i]
pi1 = points[(i + 1) % num_points]
chain.append(copy.deepcopy(link))
chain[-1]["position"] = (scale * (pi1 + pi0) / 2).tolist()
chain[-1]["position"][2] = scale * link_vertical_offsets[i % 2]
chain[-1]["rotation"][2] = numpy.arctan2(
*(pi1 - pi0)[:2][::-1]) * 180 / numpy.pi
chain.append(copy.deepcopy(chain[-1]))
chain[-1]["position"][2] *= -1
chain.append(copy.deepcopy(barring))
chain[-1]["position"] = (scale * pi0).tolist()
chain.append(copy.deepcopy(pin))
chain[-1]["position"] = (scale * pi1).tolist()
return chain
def generate_sprocket(num_links):
angles = numpy.linspace(0, 2 * numpy.pi, num=num_links,
endpoint=False).reshape(-1, 1)
angle_offset = (angles[0] + angles[1]) / 2
points = circle_points(num_links, angle_offset, radius_offset=-1.5)
spike = pymesh.load_mesh(str(get_meshes_dir_path() /
"507-movements/227-chain-pully/spike.obj"))
spikes = []
for i in range(num_links):
R = Rotation.from_euler(
'xyz', [90, 0, (i + 0.5) * 360 / num_links], degrees=True)
R = R.as_matrix()
spikes.append(
pymesh.form_mesh(spike.vertices @ R.T + points[i], spike.faces)
)
points = circle_points(num_links, 0, radius_offset=-1)
x = points[:, 0].reshape(-1, 1)
y = points[:, 1].reshape(-1, 1)
points = numpy.vstack([numpy.hstack([x, y, numpy.full(angles.shape, spike.vertices[:, 1].min())]),
numpy.hstack([x, y, numpy.full(angles.shape, spike.vertices[:, 1].max())])])
sprocket = pymesh.convex_hull(
pymesh.form_mesh(points, numpy.empty((0, 3))))
print("Union of spikes")
for spike in spikes:
sprocket = pymesh.boolean(sprocket, spike, operation="union")
print("Done")
pymesh.save_mesh(str(get_meshes_dir_path() /
f"507-movements/227-chain-pully/sprocket-{num_links}teeth.obj"),
sprocket)
def main():
scene = {
"scene_type": "distance_barrier_rb_problem",
"solver": "ipc_solver",
"timestep": 0.01,
"max_time": 5.0,
"distance_barrier_constraint": {
"initial_barrier_activation_distance": 1e-3 * scale
},
"rigid_body_problem": {
"gravity": [0, -9.81, 0],
"rigid_bodies": []
}
}
num_links_1 = 20
num_links_2 = 8
cpoints1 = circle_points(num_links_1)
cpoints1 = cpoints1[:cpoints1.shape[0] // 2 + 1]
cpoints2 = circle_points(num_links_2)
cpoints2 = cpoints2[:cpoints2.shape[0] // 2 + 1]
cpoints2[:, 1] *= -1
dx = cpoints2[-1, 0] - cpoints1[-1, 0]
dlen = 10 * link_width
dy = numpy.sqrt(dlen**2 - dx**2)
cpoints2[:, 1] -= dy
dir1 = numpy.array([dx, -dy, 0])
lpoints1 = line_points(cpoints1[-1], dir1, 10)
dir2 = dir1.copy()
dir2[1] *= -1
lpoints2 = line_points(cpoints2[0], dir2, 10)
points = numpy.vstack(
[cpoints1[:], # circle
lpoints1[1:-1], # line down
cpoints2[::-1],
lpoints2[1:-1] # line up
])
R = numpy.array([[0, 1, 0],
[-1, 0, 0],
[0, 0, 1]])
points = points @ R.T
# export_polyline(points, offset=0, loops=True)
scene["rigid_body_problem"]["rigid_bodies"] = polyline_to_chain(points)
bodies = scene["rigid_body_problem"]["rigid_bodies"]
# generate_sprocket(num_links_1)
bodies.append({
"mesh": "507-movements/227-chain-pully/sprocket-20teeth.obj",
"angular_velocity": [0, 0, 100],
"scale": scale,
"type": "kinematic",
"is_dof_fixed": ([True] * 5 + [False])
})
# generate_sprocket(num_links_2)
bodies.append({
"mesh": "507-movements/227-chain-pully/sprocket-8teeth.obj",
"scale": scale,
"type": "dynamic",
"is_dof_fixed": ([True] * 5 + [False])
})
save_fixture(scene, get_fixture_dir_path() / "3D" /
"mechanisms/507-movements" / "227-chain-pully-scaled-up.json")
if __name__ == "__main__":
main()
| [
"copy.deepcopy",
"fixture_utils.get_meshes_dir_path",
"pymesh.form_mesh",
"numpy.arctan2",
"scipy.spatial.transform.Rotation.from_euler",
"fixture_utils.get_fixture_dir_path",
"numpy.empty",
"numpy.zeros",
"numpy.hstack",
"numpy.sin",
"numpy.linalg.norm",
"numpy.array",
"numpy.cos",
"numpy... | [((1068, 1093), 'numpy.zeros', 'numpy.zeros', (['angles.shape'], {}), '(angles.shape)\n', (1079, 1093), False, 'import numpy\n'), ((1105, 1128), 'numpy.hstack', 'numpy.hstack', (['[x, y, z]'], {}), '([x, y, z])\n', (1117, 1128), False, 'import numpy\n'), ((1184, 1215), 'numpy.empty', 'numpy.empty', (['(num_links + 1, 3)'], {}), '((num_links + 1, 3))\n', (1195, 1215), False, 'import numpy\n'), ((1249, 1271), 'numpy.linalg.norm', 'numpy.linalg.norm', (['dir'], {}), '(dir)\n', (1266, 1271), False, 'import numpy\n'), ((4746, 4777), 'numpy.sqrt', 'numpy.sqrt', (['(dlen ** 2 - dx ** 2)'], {}), '(dlen ** 2 - dx ** 2)\n', (4756, 4777), False, 'import numpy\n'), ((4811, 4836), 'numpy.array', 'numpy.array', (['[dx, -dy, 0]'], {}), '([dx, -dy, 0])\n', (4822, 4836), False, 'import numpy\n'), ((4993, 5068), 'numpy.vstack', 'numpy.vstack', (['[cpoints1[:], lpoints1[1:-1], cpoints2[::-1], lpoints2[1:-1]]'], {}), '([cpoints1[:], lpoints1[1:-1], cpoints2[::-1], lpoints2[1:-1]])\n', (5005, 5068), False, 'import numpy\n'), ((5158, 5205), 'numpy.array', 'numpy.array', (['[[0, 1, 0], [-1, 0, 0], [0, 0, 1]]'], {}), '([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])\n', (5169, 5205), False, 'import numpy\n'), ((1007, 1024), 'numpy.cos', 'numpy.cos', (['angles'], {}), '(angles)\n', (1016, 1024), False, 'import numpy\n'), ((1042, 1059), 'numpy.sin', 'numpy.sin', (['angles'], {}), '(angles)\n', (1051, 1059), False, 'import numpy\n'), ((3040, 3118), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""xyz"""', '[90, 0, (i + 0.5) * 360 / num_links]'], {'degrees': '(True)'}), "('xyz', [90, 0, (i + 0.5) * 360 / num_links], degrees=True)\n", (3059, 3118), False, 'from scipy.spatial.transform import Rotation\n'), ((3771, 3821), 'pymesh.boolean', 'pymesh.boolean', (['sprocket', 'spike'], {'operation': '"""union"""'}), "(sprocket, spike, operation='union')\n", (3785, 3821), False, 'import pymesh\n'), ((778, 870), 'numpy.linspace', 'numpy.linspace', (['angle_offset', '(2 * numpy.pi + angle_offset)'], {'num': 'num_links', 'endpoint': '(False)'}), '(angle_offset, 2 * numpy.pi + angle_offset, num=num_links,\n endpoint=False)\n', (792, 870), False, 'import numpy\n'), ((2008, 2027), 'copy.deepcopy', 'copy.deepcopy', (['link'], {}), '(link)\n', (2021, 2027), False, 'import copy\n'), ((2293, 2317), 'copy.deepcopy', 'copy.deepcopy', (['chain[-1]'], {}), '(chain[-1])\n', (2306, 2317), False, 'import copy\n'), ((2380, 2402), 'copy.deepcopy', 'copy.deepcopy', (['barring'], {}), '(barring)\n', (2393, 2402), False, 'import copy\n'), ((2480, 2498), 'copy.deepcopy', 'copy.deepcopy', (['pin'], {}), '(pin)\n', (2493, 2498), False, 'import copy\n'), ((2621, 2683), 'numpy.linspace', 'numpy.linspace', (['(0)', '(2 * numpy.pi)'], {'num': 'num_links', 'endpoint': '(False)'}), '(0, 2 * numpy.pi, num=num_links, endpoint=False)\n', (2635, 2683), False, 'import numpy\n'), ((3193, 3256), 'pymesh.form_mesh', 'pymesh.form_mesh', (['(spike.vertices @ R.T + points[i])', 'spike.faces'], {}), '(spike.vertices @ R.T + points[i], spike.faces)\n', (3209, 3256), False, 'import pymesh\n'), ((3675, 3694), 'numpy.empty', 'numpy.empty', (['(0, 3)'], {}), '((0, 3))\n', (3686, 3694), False, 'import numpy\n'), ((941, 972), 'numpy.sin', 'numpy.sin', (['(numpy.pi / num_links)'], {}), '(numpy.pi / num_links)\n', (950, 972), False, 'import numpy\n'), ((2203, 2240), 'numpy.arctan2', 'numpy.arctan2', (['*(pi1 - pi0)[:2][::-1]'], {}), '(*(pi1 - pi0)[:2][::-1])\n', (2216, 2240), False, 'import numpy\n'), ((2880, 2901), 'fixture_utils.get_meshes_dir_path', 'get_meshes_dir_path', ([], {}), '()\n', (2899, 2901), False, 'from fixture_utils import save_fixture, get_fixture_dir_path, get_meshes_dir_path\n'), ((3866, 3887), 'fixture_utils.get_meshes_dir_path', 'get_meshes_dir_path', ([], {}), '()\n', (3885, 3887), False, 'from fixture_utils import save_fixture, get_fixture_dir_path, get_meshes_dir_path\n'), ((5994, 6016), 'fixture_utils.get_fixture_dir_path', 'get_fixture_dir_path', ([], {}), '()\n', (6014, 6016), False, 'from fixture_utils import save_fixture, get_fixture_dir_path, get_meshes_dir_path\n')] |
import numpy as np
import pandas as pd
from neural_network import Neural_Network
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from visualization import plot_decision_boundary
def read_data(x_file, y_file):
data = []
labels = []
with open(x_file, "r") as x_file, open(y_file, "r") as y_file:
for x, y in zip(x_file, y_file):
x = x.strip().split(",")
x = [float(each.strip()) for each in x]
data.append(x)
labels.append(int(y.strip()))
return data, labels
def read_mnist(file):
x = pd.read_csv(file, header=None)
if x.shape[1] > 784:
y = np.array(x[[784]]).flatten()
x = x.drop(columns=[784])
else:
y = np.zeros(x.shape[0])
x = np.array(x.as_matrix())
y[y == 6] = 0
y[y == 8] = 1
return x, y
def b_1(plot=False):
print("\nLogistic Regression")
model = LogisticRegression()
model.fit(train_data, train_labels)
pred = model.predict(train_data)
train_acc = accuracy_score(train_labels, pred) * 100
print("Train Set Accuracy: ", train_acc)
pred = model.predict(test_data)
test_acc = accuracy_score(test_labels, pred) * 100
print("Test Set Accuracy: ", test_acc)
if plot:
plot_decision_boundary(model.predict, np.array(train_data), np.array(train_labels), "LogisticRegression Train Set\n Accuracy: %f" % (train_acc))
plot_decision_boundary(model.predict, np.array(test_data), np.array(test_labels), "LogisticRegression Test Set\n Accuracy: %f" % (test_acc))
def b_2(plot=False, units=[5], eeta=0.1, threshold=1e-6):
print("\nNeural_Network")
model = Neural_Network(len(train_data[0]), units, activation="sigmoid")
print(model)
model.train(train_data, train_labels, max_iter=5000, eeta=eeta, batch_size=len(train_data), threshold=threshold, decay=False)
pred = model.predict(train_data)
train_acc = accuracy_score(train_labels, pred) * 100
print("Train Set Accuracy: ", train_acc)
pred = model.predict(test_data)
test_acc = accuracy_score(test_labels, pred) * 100
print("Test Set Accuracy: ", test_acc)
if plot:
plot_decision_boundary(model.predict, np.array(train_data), np.array(train_labels), "Neural_Network Train Set\n Units in Hidden layers: %s\nAccuracy: %f" % (str(model.hidden_layer_sizes), train_acc))
plot_decision_boundary(model.predict, np.array(test_data), np.array(test_labels), "Neural_Network Test Set\n Units in Hidden layers: %s\nAccuracy: %f" % (str(model.hidden_layer_sizes), test_acc))
def b_3(plot=False):
units = [1, 2, 3, 10, 20, 40]
lrs = [0.09, 0.09, 0.1, 0.1, 0.1, 0.01]
# lrs = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
for unit, lr in zip(units, lrs):
print("\nNeural_Network")
model = Neural_Network(len(train_data[0]), [unit], activation="sigmoid")
print(model)
model.train(train_data, train_labels, max_iter=10000, eeta=lr, batch_size=len(train_data), threshold=1e-6, decay=False)
pred = model.predict(train_data)
train_acc = accuracy_score(train_labels, pred) * 100
print("Train Set Accuracy: ", train_acc)
pred = model.predict(test_data)
test_acc = accuracy_score(test_labels, pred) * 100
print("Test Set Accuracy: ", test_acc)
if plot:
plot_decision_boundary(model.predict, np.array(test_data), np.array(test_labels), "Neural_Network Test Set\n Units in Hidden layers: %s\nAccuracy: %f" % (str(model.hidden_layer_sizes), test_acc))
def c_1(units=[]):
print("\nNeural_Network MNIST")
model = Neural_Network(len(mnist_trd[0]), units, activation="sigmoid")
print(model)
model.train(mnist_trd, mnist_trl, max_iter=250, eeta=0.001, batch_size=100, decay=True, threshold=1e-3)
pred = model.predict(mnist_trd)
train_acc = accuracy_score(mnist_trl, pred) * 100
print("Train Set Accuracy: ", train_acc)
pred = model.predict(mnist_ted)
test_acc = accuracy_score(mnist_tel, pred) * 100
print("Test Set Accuracy: ", test_acc)
def c_2(plot=False, units=[100], activation="sigmoid", eeta=0.1):
print("\nNeural_Network MNIST")
model = Neural_Network(len(mnist_trd[0]), units, activation=activation)
print(model)
model.train(mnist_trd, mnist_trl, max_iter=300, eeta=eeta, batch_size=100, decay=True, threshold=1e-3)
pred = model.predict(mnist_trd)
train_acc = accuracy_score(mnist_trl, pred) * 100
print("Train Set Accuracy: ", train_acc)
pred = model.predict(mnist_ted)
test_acc = accuracy_score(mnist_tel, pred) * 100
print("Test Set Accuracy: ", test_acc)
train_data, train_labels = read_data("dataset/toy_data/toy_trainX.csv", "dataset/toy_data/toy_trainY.csv")
test_data, test_labels = read_data("dataset/toy_data/toy_testX.csv", "dataset/toy_data/toy_testY.csv")
mnist_trd, mnist_trl = read_mnist("dataset/mnist_data/MNIST_train.csv")
mnist_ted, mnist_tel = read_mnist("dataset/mnist_data/MNIST_test.csv")
print(mnist_trl)
# b_1(plot=True)
# b_2(plot=True)
# b_3(plot=True)
b_2(plot=True, units=[5, 5], eeta=0.1, threshold=1e-10)
# c_1()
# c_2()
# c_2(plot=True, activation="ReLU", eeta=0.01)
| [
"pandas.read_csv",
"sklearn.metrics.accuracy_score",
"numpy.zeros",
"sklearn.linear_model.LogisticRegression",
"numpy.array"
] | [((610, 640), 'pandas.read_csv', 'pd.read_csv', (['file'], {'header': 'None'}), '(file, header=None)\n', (621, 640), True, 'import pandas as pd\n'), ((938, 958), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (956, 958), False, 'from sklearn.linear_model import LogisticRegression\n'), ((763, 783), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (771, 783), True, 'import numpy as np\n'), ((1052, 1086), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['train_labels', 'pred'], {}), '(train_labels, pred)\n', (1066, 1086), False, 'from sklearn.metrics import accuracy_score\n'), ((1190, 1223), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['test_labels', 'pred'], {}), '(test_labels, pred)\n', (1204, 1223), False, 'from sklearn.metrics import accuracy_score\n'), ((1954, 1988), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['train_labels', 'pred'], {}), '(train_labels, pred)\n', (1968, 1988), False, 'from sklearn.metrics import accuracy_score\n'), ((2092, 2125), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['test_labels', 'pred'], {}), '(test_labels, pred)\n', (2106, 2125), False, 'from sklearn.metrics import accuracy_score\n'), ((3877, 3908), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['mnist_trl', 'pred'], {}), '(mnist_trl, pred)\n', (3891, 3908), False, 'from sklearn.metrics import accuracy_score\n'), ((4012, 4043), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['mnist_tel', 'pred'], {}), '(mnist_tel, pred)\n', (4026, 4043), False, 'from sklearn.metrics import accuracy_score\n'), ((4449, 4480), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['mnist_trl', 'pred'], {}), '(mnist_trl, pred)\n', (4463, 4480), False, 'from sklearn.metrics import accuracy_score\n'), ((4584, 4615), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['mnist_tel', 'pred'], {}), '(mnist_tel, pred)\n', (4598, 4615), False, 'from sklearn.metrics import accuracy_score\n'), ((1332, 1352), 'numpy.array', 'np.array', (['train_data'], {}), '(train_data)\n', (1340, 1352), True, 'import numpy as np\n'), ((1354, 1376), 'numpy.array', 'np.array', (['train_labels'], {}), '(train_labels)\n', (1362, 1376), True, 'import numpy as np\n'), ((1485, 1504), 'numpy.array', 'np.array', (['test_data'], {}), '(test_data)\n', (1493, 1504), True, 'import numpy as np\n'), ((1506, 1527), 'numpy.array', 'np.array', (['test_labels'], {}), '(test_labels)\n', (1514, 1527), True, 'import numpy as np\n'), ((2234, 2254), 'numpy.array', 'np.array', (['train_data'], {}), '(train_data)\n', (2242, 2254), True, 'import numpy as np\n'), ((2256, 2278), 'numpy.array', 'np.array', (['train_labels'], {}), '(train_labels)\n', (2264, 2278), True, 'import numpy as np\n'), ((2442, 2461), 'numpy.array', 'np.array', (['test_data'], {}), '(test_data)\n', (2450, 2461), True, 'import numpy as np\n'), ((2463, 2484), 'numpy.array', 'np.array', (['test_labels'], {}), '(test_labels)\n', (2471, 2484), True, 'import numpy as np\n'), ((3106, 3140), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['train_labels', 'pred'], {}), '(train_labels, pred)\n', (3120, 3140), False, 'from sklearn.metrics import accuracy_score\n'), ((3256, 3289), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['test_labels', 'pred'], {}), '(test_labels, pred)\n', (3270, 3289), False, 'from sklearn.metrics import accuracy_score\n'), ((678, 696), 'numpy.array', 'np.array', (['x[[784]]'], {}), '(x[[784]])\n', (686, 696), True, 'import numpy as np\n'), ((3410, 3429), 'numpy.array', 'np.array', (['test_data'], {}), '(test_data)\n', (3418, 3429), True, 'import numpy as np\n'), ((3431, 3452), 'numpy.array', 'np.array', (['test_labels'], {}), '(test_labels)\n', (3439, 3452), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from pyinfraformat.core.utils import (
custom_float,
custom_int,
info_fi,
is_number,
print_info,
)
@pytest.mark.parametrize(
"nums",
[
(1, True),
("1", True),
("1.1", True),
("1j", True),
("1.j", True),
("-", True),
("a", False),
("1a", False),
],
)
def test_is_number(nums):
num, bool_ = nums
assert is_number(num) is bool_
@pytest.mark.parametrize("language", ["fi", "Fi", "FI", "fI"])
def test_info(language):
assert print_info() is None
assert print_info(language=language) is None
assert isinstance(info_fi(), str)
@pytest.mark.parametrize("language", ["se", "en", "fin"])
def test_info_bad(language):
with pytest.raises(NotImplementedError):
print_info(language)
@pytest.mark.parametrize("nums", [("1", 1), ("1_000", 1000), ("0", 0)])
def test_custom_int_pure(nums):
"""Input string num and number pair"""
str_num, num = nums
custom_integer = custom_int(str_num)
assert custom_integer == num
assert isinstance(custom_integer, int)
@pytest.mark.parametrize("nums", [("1.", 1), ("1_000,0", 1000), (".0", 0), ("1,0e3", 1000)])
def test_custom_int_float(nums):
"""Input string num and number pair"""
str_num, num = nums
custom_integer = custom_int(str_num)
assert custom_integer == num
assert isinstance(custom_integer, int)
@pytest.mark.parametrize(
"nums", [(".1", 0.1), ("1.1", 1.1), ("1_000,1", 1000.1), ("1,2345e3", 1234.5)]
)
def test_custom_int_to_float(nums):
str_num, num = nums
custom_integer = custom_int(str_num)
assert custom_integer == num
assert isinstance(custom_integer, float)
assert not np.isnan(custom_integer)
@pytest.mark.parametrize("num", ["-", "nan", "NaN", "NA", "test", "value", "1.2a"])
def test_custom_int_to_nan(num):
custom_integer = custom_int(num)
assert isinstance(custom_integer, float)
assert np.isnan(custom_integer)
@pytest.mark.parametrize("nums", [("1", 1.0), ("1_000,2", 1000.2), ("1e3", 1e3)])
def test_custom_float(nums):
str_num, num = nums
custom_floating = custom_float(str_num)
assert custom_floating == num
assert isinstance(custom_floating, float)
assert not np.isnan(custom_floating)
@pytest.mark.parametrize("num", ["-", "nan", "NaN", "NA", "test", "string", "1.2a"])
def test_custom_float_to_nan(num):
custom_floating = custom_float(num)
assert isinstance(custom_floating, float)
assert np.isnan(custom_floating)
| [
"pyinfraformat.core.utils.is_number",
"pyinfraformat.core.utils.print_info",
"numpy.isnan",
"pytest.raises",
"pyinfraformat.core.utils.custom_int",
"pytest.mark.parametrize",
"pyinfraformat.core.utils.info_fi",
"pyinfraformat.core.utils.custom_float"
] | [((156, 304), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nums"""', "[(1, True), ('1', True), ('1.1', True), ('1j', True), ('1.j', True), ('-', \n True), ('a', False), ('1a', False)]"], {}), "('nums', [(1, True), ('1', True), ('1.1', True), (\n '1j', True), ('1.j', True), ('-', True), ('a', False), ('1a', False)])\n", (179, 304), False, 'import pytest\n'), ((468, 529), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""language"""', "['fi', 'Fi', 'FI', 'fI']"], {}), "('language', ['fi', 'Fi', 'FI', 'fI'])\n", (491, 529), False, 'import pytest\n'), ((677, 733), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""language"""', "['se', 'en', 'fin']"], {}), "('language', ['se', 'en', 'fin'])\n", (700, 733), False, 'import pytest\n'), ((840, 910), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nums"""', "[('1', 1), ('1_000', 1000), ('0', 0)]"], {}), "('nums', [('1', 1), ('1_000', 1000), ('0', 0)])\n", (863, 910), False, 'import pytest\n'), ((1130, 1226), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nums"""', "[('1.', 1), ('1_000,0', 1000), ('.0', 0), ('1,0e3', 1000)]"], {}), "('nums', [('1.', 1), ('1_000,0', 1000), ('.0', 0), (\n '1,0e3', 1000)])\n", (1153, 1226), False, 'import pytest\n'), ((1442, 1550), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nums"""', "[('.1', 0.1), ('1.1', 1.1), ('1_000,1', 1000.1), ('1,2345e3', 1234.5)]"], {}), "('nums', [('.1', 0.1), ('1.1', 1.1), ('1_000,1', \n 1000.1), ('1,2345e3', 1234.5)])\n", (1465, 1550), False, 'import pytest\n'), ((1774, 1860), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num"""', "['-', 'nan', 'NaN', 'NA', 'test', 'value', '1.2a']"], {}), "('num', ['-', 'nan', 'NaN', 'NA', 'test', 'value',\n '1.2a'])\n", (1797, 1860), False, 'import pytest\n'), ((2011, 2099), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nums"""', "[('1', 1.0), ('1_000,2', 1000.2), ('1e3', 1000.0)]"], {}), "('nums', [('1', 1.0), ('1_000,2', 1000.2), ('1e3', \n 1000.0)])\n", (2034, 2099), False, 'import pytest\n'), ((2313, 2400), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num"""', "['-', 'nan', 'NaN', 'NA', 'test', 'string', '1.2a']"], {}), "('num', ['-', 'nan', 'NaN', 'NA', 'test', 'string',\n '1.2a'])\n", (2336, 2400), False, 'import pytest\n'), ((1031, 1050), 'pyinfraformat.core.utils.custom_int', 'custom_int', (['str_num'], {}), '(str_num)\n', (1041, 1050), False, 'from pyinfraformat.core.utils import custom_float, custom_int, info_fi, is_number, print_info\n'), ((1343, 1362), 'pyinfraformat.core.utils.custom_int', 'custom_int', (['str_num'], {}), '(str_num)\n', (1353, 1362), False, 'from pyinfraformat.core.utils import custom_float, custom_int, info_fi, is_number, print_info\n'), ((1633, 1652), 'pyinfraformat.core.utils.custom_int', 'custom_int', (['str_num'], {}), '(str_num)\n', (1643, 1652), False, 'from pyinfraformat.core.utils import custom_float, custom_int, info_fi, is_number, print_info\n'), ((1911, 1926), 'pyinfraformat.core.utils.custom_int', 'custom_int', (['num'], {}), '(num)\n', (1921, 1926), False, 'from pyinfraformat.core.utils import custom_float, custom_int, info_fi, is_number, print_info\n'), ((1983, 2007), 'numpy.isnan', 'np.isnan', (['custom_integer'], {}), '(custom_integer)\n', (1991, 2007), True, 'import numpy as np\n'), ((2167, 2188), 'pyinfraformat.core.utils.custom_float', 'custom_float', (['str_num'], {}), '(str_num)\n', (2179, 2188), False, 'from pyinfraformat.core.utils import custom_float, custom_int, info_fi, is_number, print_info\n'), ((2454, 2471), 'pyinfraformat.core.utils.custom_float', 'custom_float', (['num'], {}), '(num)\n', (2466, 2471), False, 'from pyinfraformat.core.utils import custom_float, custom_int, info_fi, is_number, print_info\n'), ((2529, 2554), 'numpy.isnan', 'np.isnan', (['custom_floating'], {}), '(custom_floating)\n', (2537, 2554), True, 'import numpy as np\n'), ((441, 455), 'pyinfraformat.core.utils.is_number', 'is_number', (['num'], {}), '(num)\n', (450, 455), False, 'from pyinfraformat.core.utils import custom_float, custom_int, info_fi, is_number, print_info\n'), ((566, 578), 'pyinfraformat.core.utils.print_info', 'print_info', ([], {}), '()\n', (576, 578), False, 'from pyinfraformat.core.utils import custom_float, custom_int, info_fi, is_number, print_info\n'), ((598, 627), 'pyinfraformat.core.utils.print_info', 'print_info', ([], {'language': 'language'}), '(language=language)\n', (608, 627), False, 'from pyinfraformat.core.utils import custom_float, custom_int, info_fi, is_number, print_info\n'), ((658, 667), 'pyinfraformat.core.utils.info_fi', 'info_fi', ([], {}), '()\n', (665, 667), False, 'from pyinfraformat.core.utils import custom_float, custom_int, info_fi, is_number, print_info\n'), ((772, 806), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (785, 806), False, 'import pytest\n'), ((816, 836), 'pyinfraformat.core.utils.print_info', 'print_info', (['language'], {}), '(language)\n', (826, 836), False, 'from pyinfraformat.core.utils import custom_float, custom_int, info_fi, is_number, print_info\n'), ((1746, 1770), 'numpy.isnan', 'np.isnan', (['custom_integer'], {}), '(custom_integer)\n', (1754, 1770), True, 'import numpy as np\n'), ((2284, 2309), 'numpy.isnan', 'np.isnan', (['custom_floating'], {}), '(custom_floating)\n', (2292, 2309), True, 'import numpy as np\n')] |
from __future__ import print_function
from .cmseq import CMSEQ_DEFAULTS
from .cmseq import BamFile
import pandas as pd
import numpy as np
import argparse
def bd_from_file():
parser = argparse.ArgumentParser(description="calculate the Breadth and Depth of coverage of BAMFILE.")
parser.add_argument('BAMFILE', help='The file on which to operate')
parser.add_argument('-c','--contig', help='Focus on a subset of references in the BAM file. Can be a list of references separated by commas or a FASTA file (the IDs are used to subset)', metavar="REFERENCE ID" ,default=None)
parser.add_argument('-f', help='If set unmapped (FUNMAP), secondary (FSECONDARY), qc-fail (FQCFAIL) and duplicate (FDUP) are excluded. If unset ALL reads are considered (bedtools genomecov style). Default: unset',action='store_true')
parser.add_argument('--sortindex', help='Sort and index the file',action='store_true')
parser.add_argument('--minlen', help='Minimum Reference Length for a reference to be considered',default=CMSEQ_DEFAULTS.minlen, type=int)
parser.add_argument('--minqual', help='Minimum base quality. Bases with quality score lower than this will be discarded. This is performed BEFORE --mincov. Default: 30', type=int, default=CMSEQ_DEFAULTS.minqual)
parser.add_argument('--mincov', help='Minimum position coverage to perform the polymorphism calculation. Position with a lower depth of coverage will be discarded (i.e. considered as zero-coverage positions). This is calculated AFTER --minqual. Default: 1', type=int, default=CMSEQ_DEFAULTS.mincov)
parser.add_argument('--truncate', help='Number of nucleotides that are truncated at either contigs end before calculating coverage values.', type=float, default=0)
parser.add_argument('--combine', help='Combine all contigs into one giant contig and report it at the end', action='store_true')
#print vars(args)
args = parser.parse_args()
si = True if args.sortindex else False
mode = 'all' if args.f else 'nofilter'
bf = BamFile(args.BAMFILE,sort=si,index=si,stepper=mode,minlen=args.minlen,filtRefGenomes=args.contig,minimumReadsAligning=args.mincov)
print('Contig\tBreadth\tDepth avg\tDepth median')
all_coverage_values = []
for i in bf.get_contigs_obj():
bd_result = i.breadth_and_depth_of_coverage(minqual=args.minqual,mincov=args.mincov,trunc=args.truncate)
if not all(np.isnan(x) for x in [bd_result[0],bd_result[1],bd_result[2]]):
print (i.name+'\t'+str(bd_result[0])+'\t'+str(bd_result[1])+'\t'+str(bd_result[2]))
if args.combine:
all_coverage_values.extend(bd_result[3])
if args.combine:
if np.all(np.isnan(all_coverage_values)):
print ("all_contigs"+'\t-\t'+str("NaN")+'\t'+str("NaN"))
else:
print ("all_contigs"+'\t-\t'+str(np.nanmean(all_coverage_values)) + '\t'+str(np.nanmedian(all_coverage_values)))
if __name__ == "__main__":
bd_from_file()
| [
"numpy.nanmean",
"argparse.ArgumentParser",
"numpy.isnan",
"numpy.nanmedian"
] | [((189, 288), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""calculate the Breadth and Depth of coverage of BAMFILE."""'}), "(description=\n 'calculate the Breadth and Depth of coverage of BAMFILE.')\n", (212, 288), False, 'import argparse\n'), ((2596, 2625), 'numpy.isnan', 'np.isnan', (['all_coverage_values'], {}), '(all_coverage_values)\n', (2604, 2625), True, 'import numpy as np\n'), ((2348, 2359), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (2356, 2359), True, 'import numpy as np\n'), ((2776, 2809), 'numpy.nanmedian', 'np.nanmedian', (['all_coverage_values'], {}), '(all_coverage_values)\n', (2788, 2809), True, 'import numpy as np\n'), ((2732, 2763), 'numpy.nanmean', 'np.nanmean', (['all_coverage_values'], {}), '(all_coverage_values)\n', (2742, 2763), True, 'import numpy as np\n')] |
"""
Author : <NAME> (<EMAIL>)
Institution : Vrije Universiteit Brussel (VUB)
Date : November 2019
Main script for heat calculation and plotting
"""
#%%
# -------------------------------------------------------------------------
# PYTHON PACKAGES
# -------------------------------------------------------------------------
import os
import sys
sys.path.append(os.getcwd())
from cdo import Cdo
cdo = Cdo()
import xarray as xr
import numpy as np
import geopandas as gpd
# -------------------------------------------------------------------------
# CONFIGURATION
# -------------------------------------------------------------------------
# -----------------------------------------------------------
# FLAGS
# ------------------------------
# turn on/off parts of script
flag_preprocess = False # this is done on the cluster, using the same scripts
flag_interpolate_watertemp = False # make interpolation of CLM temperature fields. (takes time)
flag_calcheat = False # if false use saved lake heat (otherwise use saved lake heat), for ALBM done on the cluster.
# whether or not to save calculated lake heat (can only be true if flag_calcheat is true)
flag_savelakeheat = False
flag_get_values = True
flag_plotting_forcings = False
flag_plotting_paper = True
flag_plotting_input_maps = True
flag_save_plots = False
flag_do_evaluation = False
# -----------------------------
# scenarios
# flag to set which scenario is used for heat calculation
flag_scenario = 'climate' # 'climate' : only climate change (lake cover constant at 2005 level)
# 'reservoirs' : only reservoir construction (temperature constant at 1900 level)
# 'both' : reservoir construction and climate
# Reference to which period/year anomalies are calculated
flag_ref = 'pre-industrial' # 'pre-industrial': first 30 years (1900-1929 for start_year =1900)
#flag_ref = 1971 # 1971 or any integer: year as a reference
# -----------------------------------------------------------
# PATHS
basepath = os.getcwd()
indir = basepath + '/data/ISIMIP/OutputData/lakes_global/'
outdir = basepath + '/data/processed/'
plotdir= basepath + '/data/processed/plots/'
indir_lakedata = basepath + '/data/auxiliary_data/' # directory where lake fraction and depth are located
# paths on hydra (where preprocessing is done)
#project_name = 'isimip_lakeheat/'
#indir = '/gpfs/projects/climate/data/dataset/isimip/isimip2b/OutputData/lakes_global/'
#outdir = '/scratch/brussel/100/vsc10055/'+ project_name
#plotdir= '/scratch/brussel/100/vsc10055/'+ project_name + '/plots/'
# -----------------------------------------------------------
# MODELS & FORCINGS
models = [ 'CLM45','SIMSTRAT-UoG', 'ALBM']#,'VIC-LAKE','LAKE']
forcings = ['gfdl-esm2m','hadgem2-es','ipsl-cm5a-lr','miroc5']
experiments = ['historical','future']
# experiment used for future simulations (needed to differentiate between filenames)
future_experiment = 'rcp60'
variables = ['watertemp']
# -----------------------------------------------------------
# PERIODS
start_year = 1896
end_year = 2025
years_grand = range(1850,2018,1)
years_analysis = range(start_year,end_year,1)
years_pi = range(1861,1891,1)
# depending on model
years_isimip = {}
years_isimip['CLM45'] = range(1891,2030,1)
years_isimip['SIMSTRAT-UoG'] = range(1891,2030,1)
years_isimip['ALBM'] = range(1891,2030,1)
# -----------------------------------------------------------
# CONSTANTS
resolution = 0.5 # degrees
# constants values to check
cp_liq = 4.188e3 # [J/kg K] heat capacity liquid water
cp_ice = 2.11727e3 # [J/kg K] heat capacity ice
cp_salt= 3.993e3 # [J/kg K] heat capacity salt ocean water (not used)
l_fus = 3.337e5 # [J/kg] latent heat of future
rho_liq = 1000 # [kg/m2] density liquid water
rho_ice = 0.917e3 # [kg/m2] density ice
#%%
# -------------------------------------------------------------------------
# PREPROCESS raw ISIMIP variables
# Save them into annual timeseries for wanted period and store in correct folder
# -------------------------------------------------------------------------
if flag_preprocess:
from preprocess_isimip import *
preprocess_isimip(models, forcings, variables, experiments, future_experiment, indir, outdir)
from preprocess_iceheat import *
preprocess_iceheat()
#%%
# -------------------------------------------------------------------------
# INTERPOLATE lake temperatures of CLM45
# based on lakepct mask and saves interpolated watertemps into netcdf
# -------------------------------------------------------------------------
if flag_interpolate_watertemp:
from interp_watertemp import *
for model in models:
interp_watertemp(indir_lakedata,outdir,forcings,future_experiment,model)
#%%
# -------------------------------------------------------------------------
# CALCULATE VOLUMES and LAKEHEAT
# loads hydrolakes + GLDB data to calculate lake volume per layer
# -------------------------------------------------------------------------
if flag_calcheat:
#from calc_volumes import *
from calc_lakeheat import *
#volume_per_layer = calc_volume_per_layer(flag_scenario, indir_lakedata, years_grand, start_year,end_year, resolution, models,outdir)
lakeheat = calc_lakeheat(models,forcings,future_experiment, indir_lakedata, years_grand, resolution,outdir, years_isimip,start_year, end_year, flag_scenario, flag_savelakeheat, rho_liq, cp_liq, rho_ice, cp_ice)
else:
from load_lakeheat_albm import *
# load from file based on scenario: (ALBM separate as these are calculated on HPC)
if flag_scenario == 'climate':
lakeheat = np.load(outdir+'lakeheat_climate.npy',allow_pickle='TRUE').item()
lakeheat_albm = load_lakeheat_albm(outdir,flag_scenario,years_analysis)
# lakeheat_albm = load_lakeheat_albm(outdir,flag_scenario,years_analysis,forcings)
elif flag_scenario == 'reservoirs':
lakeheat = np.load(outdir+'lakeheat_reservoirs.npy',allow_pickle='TRUE').item()
lakeheat_albm = load_lakeheat_albm(outdir,flag_scenario,years_analysis)
elif flag_scenario == 'both':
lakeheat = np.load(outdir+'lakeheat_both.npy',allow_pickle='TRUE').item()
lakeheat_albm = load_lakeheat_albm(outdir,flag_scenario,years_analysis)
# add ALBM dictionary to lakeheat dict.
lakeheat.update(lakeheat_albm)
#%%
# -------------------------------------------------------------------------
# GET VALUES for paper
# -------------------------------------------------------------------------
if flag_get_values:
from get_values_lakeheat import *
get_values(outdir,flag_ref, years_analysis, indir_lakedata, resolution)
#%%
# -------------------------------------------------------------------------
# PLOTTING
# Do the plotting - works with internal flags
# data aggregation is done from within functions
# -------------------------------------------------------------------------
if flag_plotting_forcings:
from plotting_lakeheat import *
plot_forcings(flag_save_plots, plotdir, models,forcings, lakeheat, flag_ref, years_analysis,outdir)
if flag_plotting_paper:
from plotting_lakeheat import *
from plotting_casestudies import *
do_plotting(flag_save_plots, plotdir, models , forcings, lakeheat, flag_ref, years_analysis,outdir)
plot_forcings_allmodels(flag_save_plots, plotdir, models,forcings, lakeheat, flag_ref, years_analysis,outdir)
plot_casestudies(basepath,indir_lakedata,outdir,flag_ref,years_analysis)
if flag_plotting_input_maps: # plotting of lake/reservoir area fraction and lake depth
from plotting_globalmaps import *
do_plotting_globalmaps(indir_lakedata, plotdir, years_grand,start_year,end_year)
#%%
# -------------------------------------------------------------------------
# EVALUATION
#
# Do spot evaluations
# -------------------------------------------------------------------------
if flag_do_evaluation:
from preprocess_obs import *
from do_evaluation import *
preprocess_obs(basepath)
do_evaluation()
| [
"os.getcwd",
"cdo.Cdo",
"numpy.load"
] | [((417, 422), 'cdo.Cdo', 'Cdo', ([], {}), '()\n', (420, 422), False, 'from cdo import Cdo\n'), ((2078, 2089), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2087, 2089), False, 'import os\n'), ((377, 388), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (386, 388), False, 'import os\n'), ((5760, 5821), 'numpy.load', 'np.load', (["(outdir + 'lakeheat_climate.npy')"], {'allow_pickle': '"""TRUE"""'}), "(outdir + 'lakeheat_climate.npy', allow_pickle='TRUE')\n", (5767, 5821), True, 'import numpy as np\n'), ((6063, 6127), 'numpy.load', 'np.load', (["(outdir + 'lakeheat_reservoirs.npy')"], {'allow_pickle': '"""TRUE"""'}), "(outdir + 'lakeheat_reservoirs.npy', allow_pickle='TRUE')\n", (6070, 6127), True, 'import numpy as np\n'), ((6274, 6332), 'numpy.load', 'np.load', (["(outdir + 'lakeheat_both.npy')"], {'allow_pickle': '"""TRUE"""'}), "(outdir + 'lakeheat_both.npy', allow_pickle='TRUE')\n", (6281, 6332), True, 'import numpy as np\n')] |
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.sequence.graphics"
__author__ = "<NAME>"
__all__ = ["plot_sequence_logo"]
import numpy as np
from ...visualize import set_font_size_in_coord
from ..alphabet import LetterAlphabet
from .colorschemes import get_color_scheme
import warnings
from ..align import Alignment
from .. import SequenceProfile
def plot_sequence_logo(axes, profile, scheme=None, **kwargs):
"""
Create a sequence logo. :footcite:`Schneider1990`
A sequence logo is visualizes the positional composition and
conservation of a profile encoded in the size of the letters.
Each position displays all symbols that are occurring at this
position stacked on each other, with their relative heights depicting
their relative frequency.
The height of such a stack depicts its conservation.
It is the maximum possible Shannon entropy of the alphabet
subtracted by the positional entropy.
Parameters
----------
axes : Axes
The axes to draw the logo one.
profile: SequenceProfile
The logo is created based on this profile.
scheme : str or list of (tuple or str)
Either a valid color scheme name
(e.g. ``"rainbow"``, ``"clustalx"``, ``blossom``, etc.)
or a list of *Matplotlib* compatible colors.
The list length must be at least as long as the
length of the alphabet used by the `profile`.
**kwargs
Additional `text parameters <https://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_.
References
----------
.. footbibliography::
"""
from matplotlib.text import Text
if isinstance(profile, Alignment):
warnings.warn("Using an alignment for this method is deprecated; use a profile instead", DeprecationWarning)
profile = SequenceProfile.from_alignment(profile)
alphabet = profile.alphabet
if not isinstance(alphabet, LetterAlphabet):
raise TypeError("The sequences' alphabet must be a letter alphabet")
if scheme is None:
colors = get_color_scheme("rainbow", alphabet)
elif isinstance(scheme, str):
colors = get_color_scheme(scheme, alphabet)
else:
colors = scheme
# 'color' and 'size' property is not passed on to text
kwargs.pop("color", None)
kwargs.pop("size", None)
frequencies, entropies, max_entropy = _get_entropy(profile)
stack_heights = (max_entropy - entropies)
symbols_heights = stack_heights[:, np.newaxis] * frequencies
index_order = np.argsort(symbols_heights, axis=1)
for i in range(symbols_heights.shape[0]):
# Iterate over the alignment columns
index_order = np.argsort(symbols_heights)
start_height = 0
for j in index_order[i]:
# Stack the symbols at position on top of the preceeding one
height = symbols_heights[i,j]
if height > 0:
symbol = alphabet.decode(j)
text = axes.text(
i+0.5, start_height, symbol,
ha="left", va="bottom", color=colors[j],
# Best results are obtained with this font size
size=1,
**kwargs
)
text.set_clip_on(True)
set_font_size_in_coord(text, width=1, height=height)
start_height += height
axes.set_xlim(0.5, len(profile.symbols)+0.5)
axes.set_ylim(0, max_entropy)
def _get_entropy(profile):
freq = profile.symbols
freq = freq / np.sum(freq, axis=1)[:, np.newaxis]
# 0 * log2(0) = 0 -> Convert NaN to 0
no_zeros = freq != 0
pre_entropies = np.zeros(freq.shape)
pre_entropies[no_zeros] \
= freq[no_zeros] * np.log2(freq[no_zeros])
entropies = -np.sum(pre_entropies, axis=1)
max_entropy = np.log2(len(profile.alphabet))
return freq, entropies, max_entropy | [
"numpy.sum",
"numpy.log2",
"numpy.zeros",
"numpy.argsort",
"warnings.warn"
] | [((2665, 2700), 'numpy.argsort', 'np.argsort', (['symbols_heights'], {'axis': '(1)'}), '(symbols_heights, axis=1)\n', (2675, 2700), True, 'import numpy as np\n'), ((3801, 3821), 'numpy.zeros', 'np.zeros', (['freq.shape'], {}), '(freq.shape)\n', (3809, 3821), True, 'import numpy as np\n'), ((1818, 1935), 'warnings.warn', 'warnings.warn', (['"""Using an alignment for this method is deprecated; use a profile instead"""', 'DeprecationWarning'], {}), "(\n 'Using an alignment for this method is deprecated; use a profile instead',\n DeprecationWarning)\n", (1831, 1935), False, 'import warnings\n'), ((2814, 2841), 'numpy.argsort', 'np.argsort', (['symbols_heights'], {}), '(symbols_heights)\n', (2824, 2841), True, 'import numpy as np\n'), ((3879, 3902), 'numpy.log2', 'np.log2', (['freq[no_zeros]'], {}), '(freq[no_zeros])\n', (3886, 3902), True, 'import numpy as np\n'), ((3920, 3949), 'numpy.sum', 'np.sum', (['pre_entropies'], {'axis': '(1)'}), '(pre_entropies, axis=1)\n', (3926, 3949), True, 'import numpy as np\n'), ((3678, 3698), 'numpy.sum', 'np.sum', (['freq'], {'axis': '(1)'}), '(freq, axis=1)\n', (3684, 3698), True, 'import numpy as np\n')] |
from env_suite.envs import controlTableLine
import time
import numpy as np
global isWindows
isWindows = False
try:
from win32api import STD_INPUT_HANDLE
from win32console import GetStdHandle, KEY_EVENT, ENABLE_ECHO_INPUT, ENABLE_LINE_INPUT, ENABLE_PROCESSED_INPUT
isWindows = True
except ImportError as e:
import sys
import select
import termios
class KeyPoller():
def __enter__(self):
global isWindows
if isWindows:
self.readHandle = GetStdHandle(STD_INPUT_HANDLE)
self.readHandle.SetConsoleMode(ENABLE_LINE_INPUT|ENABLE_ECHO_INPUT|ENABLE_PROCESSED_INPUT)
self.curEventLength = 0
self.curKeysLength = 0
self.capturedChars = []
else:
# Save the terminal settings
self.fd = sys.stdin.fileno()
self.new_term = termios.tcgetattr(self.fd)
self.old_term = termios.tcgetattr(self.fd)
# New terminal setting unbuffered
self.new_term[3] = (self.new_term[3] & ~termios.ICANON & ~termios.ECHO)
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.new_term)
return self
def __exit__(self, type, value, traceback):
if isWindows:
pass
else:
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old_term)
def poll(self):
if isWindows:
if not len(self.capturedChars) == 0:
return self.capturedChars.pop(0)
eventsPeek = self.readHandle.PeekConsoleInput(10000)
if len(eventsPeek) == 0:
return None
if not len(eventsPeek) == self.curEventLength:
for curEvent in eventsPeek[self.curEventLength:]:
if curEvent.EventType == KEY_EVENT:
if ord(curEvent.Char) == 0 or not curEvent.KeyDown:
pass
else:
curChar = str(curEvent.Char)
self.capturedChars.append(curChar)
self.curEventLength = len(eventsPeek)
if not len(self.capturedChars) == 0:
return self.capturedChars.pop(0)
else:
return None
else:
dr,dw,de = select.select([sys.stdin], [], [], 0)
if not dr == []:
return sys.stdin.read(1)
return None
if __name__ == "__main__":
env = controlTableLine()
env.reset()
env.render()
with KeyPoller() as keyPoller:
reward_sum = 0
steps = 0
while True:
c = keyPoller.poll()
done = False
action = np.zeros((2,), dtype=np.float32)
if not c is None:
if c == "c":
break
elif c == "d":
action = np.array([1, 0], dtype=np.float32)
elif c == "w":
action = np.array([0, 1], dtype=np.float32)
elif c == "a":
action = np.array([-1, 0], dtype=np.float32)
elif c == "s":
action = np.array([0, -1], dtype=np.float32)
obs, reward, done, _ = env.step(action)
reward_sum += reward
steps += 1
print(f"Reward: {reward}, steps: {steps}, state: {obs}")
env.render()
if done:
env.reset()
reward_sum = 0
steps = 0
time.sleep(0.5)
env.render() | [
"sys.stdin.read",
"termios.tcgetattr",
"env_suite.envs.controlTableLine",
"win32console.GetStdHandle",
"numpy.zeros",
"termios.tcsetattr",
"time.sleep",
"select.select",
"numpy.array",
"sys.stdin.fileno"
] | [((2464, 2482), 'env_suite.envs.controlTableLine', 'controlTableLine', ([], {}), '()\n', (2480, 2482), False, 'from env_suite.envs import controlTableLine\n'), ((495, 525), 'win32console.GetStdHandle', 'GetStdHandle', (['STD_INPUT_HANDLE'], {}), '(STD_INPUT_HANDLE)\n', (507, 525), False, 'from win32console import GetStdHandle, KEY_EVENT, ENABLE_ECHO_INPUT, ENABLE_LINE_INPUT, ENABLE_PROCESSED_INPUT\n'), ((815, 833), 'sys.stdin.fileno', 'sys.stdin.fileno', ([], {}), '()\n', (831, 833), False, 'import sys\n'), ((862, 888), 'termios.tcgetattr', 'termios.tcgetattr', (['self.fd'], {}), '(self.fd)\n', (879, 888), False, 'import termios\n'), ((917, 943), 'termios.tcgetattr', 'termios.tcgetattr', (['self.fd'], {}), '(self.fd)\n', (934, 943), False, 'import termios\n'), ((1087, 1147), 'termios.tcsetattr', 'termios.tcsetattr', (['self.fd', 'termios.TCSAFLUSH', 'self.new_term'], {}), '(self.fd, termios.TCSAFLUSH, self.new_term)\n', (1104, 1147), False, 'import termios\n'), ((1283, 1343), 'termios.tcsetattr', 'termios.tcsetattr', (['self.fd', 'termios.TCSAFLUSH', 'self.old_term'], {}), '(self.fd, termios.TCSAFLUSH, self.old_term)\n', (1300, 1343), False, 'import termios\n'), ((2294, 2331), 'select.select', 'select.select', (['[sys.stdin]', '[]', '[]', '(0)'], {}), '([sys.stdin], [], [], 0)\n', (2307, 2331), False, 'import select\n'), ((2691, 2723), 'numpy.zeros', 'np.zeros', (['(2,)'], {'dtype': 'np.float32'}), '((2,), dtype=np.float32)\n', (2699, 2723), True, 'import numpy as np\n'), ((2384, 2401), 'sys.stdin.read', 'sys.stdin.read', (['(1)'], {}), '(1)\n', (2398, 2401), False, 'import sys\n'), ((3515, 3530), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3525, 3530), False, 'import time\n'), ((2869, 2903), 'numpy.array', 'np.array', (['[1, 0]'], {'dtype': 'np.float32'}), '([1, 0], dtype=np.float32)\n', (2877, 2903), True, 'import numpy as np\n'), ((2964, 2998), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.float32'}), '([0, 1], dtype=np.float32)\n', (2972, 2998), True, 'import numpy as np\n'), ((3059, 3094), 'numpy.array', 'np.array', (['[-1, 0]'], {'dtype': 'np.float32'}), '([-1, 0], dtype=np.float32)\n', (3067, 3094), True, 'import numpy as np\n'), ((3155, 3190), 'numpy.array', 'np.array', (['[0, -1]'], {'dtype': 'np.float32'}), '([0, -1], dtype=np.float32)\n', (3163, 3190), True, 'import numpy as np\n')] |
from sys import exit
import numpy as np
import h5py
from .plot import Plot
from .utils import load_posteriors, create_templates
from .conf import set_plot_params
class Validation:
"""
Internal class for performing validation on the univariate and/or multivariate posterior PDFs of the test samples
generated by the trained model.
The marginal PDFs are validated using the framework developed by Gneiting et al. (2007)
(https://hal.archives-ouvertes.fr/file/index/docid/363242/filename/jrssb1b.pdf).
The multivariate PDFs are validated using the multivariate extension of the framework developed by
Ziegel and Gneiting. (2014) (https://projecteuclid.org/download/pdfview_1/euclid.ejs/1418313582).
Parameters
----------
y_test: array_like
An array of target features of testing galaxies with the same shape as y_train.
target_features: list
A list of variables of target features.
path: str
Location of the model directory.
"""
def __init__(self, y_test, y_pred, posteriors, validation, target_features, no_samples, no_features, path):
# Initialise arguments
self.y_test = y_test
self.y_pred = y_pred
self.posteriors = posteriors
self.validation = validation
self.target_features = target_features
self.no_samples = no_samples
self.no_features = no_features
self.path = path
# Internal parameters
self.no_points = set_plot_params()[0]
self.posterior_folder = 'posteriors/'
self.validation_folder = 'validation/'
# Initialise classes
self.plot = Plot(y_test=self.y_test, y_pred=self.y_pred, posteriors=self.posteriors, validation=self.validation,
target_features=self.target_features, no_samples=self.no_samples, no_features=self.no_features,
path=self.path)
def validate(self, save_validation=False, make_plots=False):
"""Top-level function for performing all modes of validation."""
if self.y_test is None:
print('Provide y_test to perform validation.')
exit()
# Load posteriors
if self.posteriors is None:
self.posteriors = load_posteriors(path=self.path)
# Validation file
validation = h5py.File(self.path + self.validation_folder + "validation.h5", 'w', driver='core',
backing_store=save_validation)
# Run validation
pits = self.probabilistic_calibration()
marginal_calibration = self.marginal_calibration()
validation.create_dataset('pits', data=pits)
validation.create_dataset('marginal_calibration', data=marginal_calibration)
if self.no_features > 1:
coppits, pred_cdf_full, true_cdf_full = self.probabilistic_copula_calibration()
kendall_calibration = self.kendall_calibration(pred_cdf_full, true_cdf_full)
validation.create_dataset('coppits', data=coppits)
validation.create_dataset('kendall_calibration', data=kendall_calibration)
# Create plots
self.plot.validation = validation
if make_plots:
print('Creating PIT plots...')
self.plot.plot_pit()
print('Creating marginal calibration plots...')
self.plot.plot_marginal_calibration()
if self.no_features > 1:
print('Creating copPIT plots...')
self.plot.plot_coppit()
print('Creating kendall calibration plots...')
self.plot.plot_kendall_calibration()
if save_validation:
print('Saved validation. Any previously saved validation has been overwritten.')
return validation
def probabilistic_calibration(self):
"""Performs probabilistic calibration"""
pits = np.empty((self.no_samples, self.no_features))
for sample in np.arange(self.no_samples):
posterior = self.posteriors[str(sample)][:]
pits[sample] = np.sum(posterior <= self.y_test[sample], axis=0) / posterior.shape[0]
print('Completed probabilistic calibration.')
return pits
def marginal_calibration(self):
"""Performs marginal calibration"""
points = np.linspace(np.floor(np.min(self.y_test, axis=0)), np.ceil(np.max(self.y_test, axis=0)), self.no_points)
marginal_calibration = np.empty((self.no_points, self.no_features))
for point in np.arange(self.no_points):
probs = np.zeros(self.no_features)
for sample in np.arange(self.no_samples):
posterior = self.posteriors[str(sample)][:]
probs += np.sum(posterior <= points[point], axis=0) / posterior.shape[0]
pred_cdf_marg_point = probs / self.no_samples
true_cdf_marg_point = np.sum(self.y_test <= points[point], axis=0) / self.no_samples
marginal_calibration[point] = pred_cdf_marg_point - true_cdf_marg_point
marginal_calibration = np.stack((points, marginal_calibration))
print('Completed marginal calibration.')
return marginal_calibration
def probabilistic_copula_calibration(self):
"""Performs probabilistic copula calibration"""
# Creating a list of list containing pred_cdf of each point in predictions
pred_cdf_full = [[] for i in np.arange(self.no_samples)]
true_cdf_full = []
coppits = np.empty(self.no_samples)
template_pred, template_true, template_same = create_templates(no_features=self.no_features)
for sample in np.arange(self.no_samples):
posterior = self.posteriors[str(sample)][:]
no_preds = posterior.shape[0]
for pred in np.arange(no_preds):
# For point at edges, if <= used, then point counts and cdf is never 0.
# If <= is used, a large number of point will have near 0 probability, as a result, there will
# be a peak at 0.
# -1 insures, the point in consideration does not count when determining cdf.
same_preds = np.sum(eval(template_same))
pred_cdf_full[sample].append(np.sum(eval(template_pred)) / (no_preds - same_preds))
true_cdf_full.append(np.sum(eval(template_true)) / no_preds)
coppits[sample] = np.sum(pred_cdf_full[sample] <= true_cdf_full[sample]) / no_preds
print('Completed probabilistic copula calibration.')
return coppits, pred_cdf_full, true_cdf_full
def kendall_calibration(self, pred_cdf_full, true_cdf_full):
"""Performs kendall calibration"""
kendall_calibration = np.empty(self.no_points)
count = 0
for point in np.linspace(0, 1, self.no_points):
sum_ = np.zeros(self.no_samples)
for sample in np.arange(self.no_samples):
sum_[sample] = np.sum(pred_cdf_full[sample] <= point) / len(pred_cdf_full[sample])
kendall_func_point = np.sum(sum_) / self.no_samples
true_cdf_point = np.sum(true_cdf_full <= point) / self.no_samples
kendall_calibration[count] = kendall_func_point - true_cdf_point
count += 1
print('Completed kendall calibration.')
return kendall_calibration
| [
"numpy.stack",
"h5py.File",
"numpy.sum",
"numpy.empty",
"numpy.zeros",
"numpy.min",
"numpy.max",
"numpy.arange",
"numpy.linspace",
"sys.exit"
] | [((2339, 2458), 'h5py.File', 'h5py.File', (["(self.path + self.validation_folder + 'validation.h5')", '"""w"""'], {'driver': '"""core"""', 'backing_store': 'save_validation'}), "(self.path + self.validation_folder + 'validation.h5', 'w', driver\n ='core', backing_store=save_validation)\n", (2348, 2458), False, 'import h5py\n'), ((3895, 3940), 'numpy.empty', 'np.empty', (['(self.no_samples, self.no_features)'], {}), '((self.no_samples, self.no_features))\n', (3903, 3940), True, 'import numpy as np\n'), ((3964, 3990), 'numpy.arange', 'np.arange', (['self.no_samples'], {}), '(self.no_samples)\n', (3973, 3990), True, 'import numpy as np\n'), ((4455, 4499), 'numpy.empty', 'np.empty', (['(self.no_points, self.no_features)'], {}), '((self.no_points, self.no_features))\n', (4463, 4499), True, 'import numpy as np\n'), ((4522, 4547), 'numpy.arange', 'np.arange', (['self.no_points'], {}), '(self.no_points)\n', (4531, 4547), True, 'import numpy as np\n'), ((5072, 5112), 'numpy.stack', 'np.stack', (['(points, marginal_calibration)'], {}), '((points, marginal_calibration))\n', (5080, 5112), True, 'import numpy as np\n'), ((5497, 5522), 'numpy.empty', 'np.empty', (['self.no_samples'], {}), '(self.no_samples)\n', (5505, 5522), True, 'import numpy as np\n'), ((5647, 5673), 'numpy.arange', 'np.arange', (['self.no_samples'], {}), '(self.no_samples)\n', (5656, 5673), True, 'import numpy as np\n'), ((6728, 6752), 'numpy.empty', 'np.empty', (['self.no_points'], {}), '(self.no_points)\n', (6736, 6752), True, 'import numpy as np\n'), ((6793, 6826), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.no_points'], {}), '(0, 1, self.no_points)\n', (6804, 6826), True, 'import numpy as np\n'), ((2159, 2165), 'sys.exit', 'exit', ([], {}), '()\n', (2163, 2165), False, 'from sys import exit\n'), ((4569, 4595), 'numpy.zeros', 'np.zeros', (['self.no_features'], {}), '(self.no_features)\n', (4577, 4595), True, 'import numpy as np\n'), ((4623, 4649), 'numpy.arange', 'np.arange', (['self.no_samples'], {}), '(self.no_samples)\n', (4632, 4649), True, 'import numpy as np\n'), ((5798, 5817), 'numpy.arange', 'np.arange', (['no_preds'], {}), '(no_preds)\n', (5807, 5817), True, 'import numpy as np\n'), ((6847, 6872), 'numpy.zeros', 'np.zeros', (['self.no_samples'], {}), '(self.no_samples)\n', (6855, 6872), True, 'import numpy as np\n'), ((6899, 6925), 'numpy.arange', 'np.arange', (['self.no_samples'], {}), '(self.no_samples)\n', (6908, 6925), True, 'import numpy as np\n'), ((4075, 4123), 'numpy.sum', 'np.sum', (['(posterior <= self.y_test[sample])'], {'axis': '(0)'}), '(posterior <= self.y_test[sample], axis=0)\n', (4081, 4123), True, 'import numpy as np\n'), ((4340, 4367), 'numpy.min', 'np.min', (['self.y_test'], {'axis': '(0)'}), '(self.y_test, axis=0)\n', (4346, 4367), True, 'import numpy as np\n'), ((4378, 4405), 'numpy.max', 'np.max', (['self.y_test'], {'axis': '(0)'}), '(self.y_test, axis=0)\n', (4384, 4405), True, 'import numpy as np\n'), ((4893, 4937), 'numpy.sum', 'np.sum', (['(self.y_test <= points[point])'], {'axis': '(0)'}), '(self.y_test <= points[point], axis=0)\n', (4899, 4937), True, 'import numpy as np\n'), ((5424, 5450), 'numpy.arange', 'np.arange', (['self.no_samples'], {}), '(self.no_samples)\n', (5433, 5450), True, 'import numpy as np\n'), ((6407, 6461), 'numpy.sum', 'np.sum', (['(pred_cdf_full[sample] <= true_cdf_full[sample])'], {}), '(pred_cdf_full[sample] <= true_cdf_full[sample])\n', (6413, 6461), True, 'import numpy as np\n'), ((7060, 7072), 'numpy.sum', 'np.sum', (['sum_'], {}), '(sum_)\n', (7066, 7072), True, 'import numpy as np\n'), ((7120, 7150), 'numpy.sum', 'np.sum', (['(true_cdf_full <= point)'], {}), '(true_cdf_full <= point)\n', (7126, 7150), True, 'import numpy as np\n'), ((4736, 4778), 'numpy.sum', 'np.sum', (['(posterior <= points[point])'], {'axis': '(0)'}), '(posterior <= points[point], axis=0)\n', (4742, 4778), True, 'import numpy as np\n'), ((6958, 6996), 'numpy.sum', 'np.sum', (['(pred_cdf_full[sample] <= point)'], {}), '(pred_cdf_full[sample] <= point)\n', (6964, 6996), True, 'import numpy as np\n')] |
import copy
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
try:
from sklearn.model_selection import train_test_split
except ImportError:
from sklearn.cross_validation import train_test_split
from libact.base.dataset import Dataset, import_libsvm_sparse
from libact.query_strategies.random_sampling import RandomSampling
from libact.query_strategies.uncertainty_sampling import UncertaintySampling
from libact.query_strategies.variance_reduction import VarianceReduction
from libact.labelers.ideal_labeler import IdealLabeler
from libact.models.svm import SVM
from libact.models import *
from matplotlib import pyplot as plt
def run(trn_ds, tst_ds, lbr, model, qs, quota, batch_size):
E_in, E_out = [], []
for _ in range(quota//batch_size):
# Standard usage of libact objects
for i in range(batch_size):
ask_id = qs.make_query()
X, _ = zip(*trn_ds.data)
lb = lbr.label(X[ask_id])
trn_ds.update(ask_id, lb)
model.train(trn_ds)
E_in = np.append(E_in, 1 - model.score(trn_ds))
E_out = np.append(E_out, 1 - model.score(tst_ds))
return E_in, E_out
def split_train_test(dataset_filepath, test_size, n_labeled):
#X, y = import_libsvm_sparse(dataset_filepath).format_sklearn()
X = np.load('data/phrases_50dim.npy')[:2000]
y = np.load('data/sentiments_50dim.npy')[:2000]
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=test_size)
trn_ds = Dataset(X_train, np.concatenate(
[y_train[:n_labeled], [None] * (len(y_train) - n_labeled)]))
tst_ds = Dataset(X_test, y_test)
fully_labeled_trn_ds = Dataset(X_train, y_train)
return trn_ds, tst_ds, y_train, fully_labeled_trn_ds
def main():
# Specifiy the parameters here:
# path to your binary classification dataset
dataset_filepath = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'diabetes.txt')
test_size = 0.33 # the percentage of samples in the dataset that will be
# randomly selected and assigned to the test set
n_labeled = 316 # number of samples that are initially labeled
batch_size = 32
# Load dataset
trn_ds, tst_ds, y_train, fully_labeled_trn_ds = \
split_train_test(dataset_filepath, test_size, n_labeled)
trn_ds2 = copy.deepcopy(trn_ds)
lbr = IdealLabeler(fully_labeled_trn_ds)
quota = len(y_train) - n_labeled # number of samples to query
# Comparing UncertaintySampling strategy with RandomSampling.
# model is the base learner, e.g. LogisticRegression, SVM ... etc.
qs = UncertaintySampling(trn_ds, method='lc', model=LogisticRegression())
model = LogisticRegression()
E_in_1, E_out_1 = run(trn_ds, tst_ds, lbr, model, qs, quota, batch_size)
qs2 = RandomSampling(trn_ds2)
model = LogisticRegression()
E_in_2, E_out_2 = run(trn_ds2, tst_ds, lbr, model, qs2, quota, batch_size)
# Plot the learning curve of UncertaintySampling to RandomSampling
# The x-axis is the number of queries, and the y-axis is the corresponding
# error rate.
query_num = np.arange(1, quota + 1)
print('plotting')
print(E_in_1, E_out_1, E_in_2, E_out_2)
plt.plot(query_num, E_in_1, 'b', label='qs Ein')
plt.plot(query_num, E_in_2, 'r', label='random Ein')
plt.plot(query_num, E_out_1, 'g', label='qs Eout')
plt.plot(query_num, E_out_2, 'k', label='random Eout')
plt.xlabel('Number of Queries')
plt.ylabel('Error')
plt.title('Experiment Result')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),
fancybox=True, shadow=True, ncol=5)
plt.show()
if __name__ == '__main__':
main()
'''
if __name__ == "__main__":
f = open('data/dictionary.txt', 'r').readlines()
phrases = dict()
for line in f:
phrase, idx = line.strip().split('|')
phrases[int(idx)] = phrase
sentiments = dict()
f = open('data/sentiment_labels.txt', 'r').readlines()
for line in f[1:]:
idx, senti = line.strip().split('|')
sentiments[int(idx)] = 0 if float(senti) < 0.5 else 1
## remove some labels
sparse = sentiments.copy()
for i in range(len(sentiments))[::2]:
sparse[i] = None
dataset = Dataset(list(phrases.values()), list(sparse.values()))
query_strategy = RandomSampling(dataset)
labeler = IdealLabeler(Dataset(list(phrases.values()),list(sentiments.values())))
model = SVM()
num_queries = 10
for i in range(num_queries):
q_id = query_strategy.make_query()
label = labeler.label(dataset.data[q_id][0])
dataset.update(q_id, 1)
model.train(dataset)
'''
| [
"matplotlib.pyplot.title",
"sklearn.cross_validation.train_test_split",
"copy.deepcopy",
"numpy.load",
"matplotlib.pyplot.show",
"libact.labelers.ideal_labeler.IdealLabeler",
"libact.base.dataset.Dataset",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"os.path.realpath",
"matplotlib.use"... | [((59, 80), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (73, 80), False, 'import matplotlib\n'), ((1458, 1501), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size'}), '(X, y, test_size=test_size)\n', (1474, 1501), False, 'from sklearn.cross_validation import train_test_split\n'), ((1630, 1653), 'libact.base.dataset.Dataset', 'Dataset', (['X_test', 'y_test'], {}), '(X_test, y_test)\n', (1637, 1653), False, 'from libact.base.dataset import Dataset, import_libsvm_sparse\n'), ((1681, 1706), 'libact.base.dataset.Dataset', 'Dataset', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (1688, 1706), False, 'from libact.base.dataset import Dataset, import_libsvm_sparse\n'), ((2347, 2368), 'copy.deepcopy', 'copy.deepcopy', (['trn_ds'], {}), '(trn_ds)\n', (2360, 2368), False, 'import copy\n'), ((2379, 2413), 'libact.labelers.ideal_labeler.IdealLabeler', 'IdealLabeler', (['fully_labeled_trn_ds'], {}), '(fully_labeled_trn_ds)\n', (2391, 2413), False, 'from libact.labelers.ideal_labeler import IdealLabeler\n'), ((2821, 2844), 'libact.query_strategies.random_sampling.RandomSampling', 'RandomSampling', (['trn_ds2'], {}), '(trn_ds2)\n', (2835, 2844), False, 'from libact.query_strategies.random_sampling import RandomSampling\n'), ((3142, 3165), 'numpy.arange', 'np.arange', (['(1)', '(quota + 1)'], {}), '(1, quota + 1)\n', (3151, 3165), True, 'import numpy as np\n'), ((3236, 3284), 'matplotlib.pyplot.plot', 'plt.plot', (['query_num', 'E_in_1', '"""b"""'], {'label': '"""qs Ein"""'}), "(query_num, E_in_1, 'b', label='qs Ein')\n", (3244, 3284), True, 'from matplotlib import pyplot as plt\n'), ((3289, 3341), 'matplotlib.pyplot.plot', 'plt.plot', (['query_num', 'E_in_2', '"""r"""'], {'label': '"""random Ein"""'}), "(query_num, E_in_2, 'r', label='random Ein')\n", (3297, 3341), True, 'from matplotlib import pyplot as plt\n'), ((3346, 3396), 'matplotlib.pyplot.plot', 'plt.plot', (['query_num', 'E_out_1', '"""g"""'], {'label': '"""qs Eout"""'}), "(query_num, E_out_1, 'g', label='qs Eout')\n", (3354, 3396), True, 'from matplotlib import pyplot as plt\n'), ((3401, 3455), 'matplotlib.pyplot.plot', 'plt.plot', (['query_num', 'E_out_2', '"""k"""'], {'label': '"""random Eout"""'}), "(query_num, E_out_2, 'k', label='random Eout')\n", (3409, 3455), True, 'from matplotlib import pyplot as plt\n'), ((3460, 3491), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Queries"""'], {}), "('Number of Queries')\n", (3470, 3491), True, 'from matplotlib import pyplot as plt\n'), ((3496, 3515), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (3506, 3515), True, 'from matplotlib import pyplot as plt\n'), ((3520, 3550), 'matplotlib.pyplot.title', 'plt.title', (['"""Experiment Result"""'], {}), "('Experiment Result')\n", (3529, 3550), True, 'from matplotlib import pyplot as plt\n'), ((3555, 3654), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""', 'bbox_to_anchor': '(0.5, -0.05)', 'fancybox': '(True)', 'shadow': '(True)', 'ncol': '(5)'}), "(loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=True,\n shadow=True, ncol=5)\n", (3565, 3654), True, 'from matplotlib import pyplot as plt\n'), ((3670, 3680), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3678, 3680), True, 'from matplotlib import pyplot as plt\n'), ((1316, 1349), 'numpy.load', 'np.load', (['"""data/phrases_50dim.npy"""'], {}), "('data/phrases_50dim.npy')\n", (1323, 1349), True, 'import numpy as np\n'), ((1365, 1401), 'numpy.load', 'np.load', (['"""data/sentiments_50dim.npy"""'], {}), "('data/sentiments_50dim.npy')\n", (1372, 1401), True, 'import numpy as np\n'), ((1925, 1951), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1941, 1951), False, 'import os\n')] |
import numpy as np
import xarray as xr
# from concurrent.futures import ProcessPoolExecutor
# pool = ProcessPoolExecutor()
from functools import partial
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
double_array = partial(np.asarray, dtype='f8')
def gen_sq_mean(sq):
sqa = double_array(sq)
sqm = np.einsum(sqa, [0,Ellipsis], [Ellipsis]) / float(sqa.shape[0])
return sqa, sqm
def gen_split_events(chopped_polys, poly_areas, slicer, event_ids=None):
"""
chopped_polys is a list of N polygons whose elements contain the sub-polys of each polygon.
It is the data structure created by QuadMeshPolySlicer.slice
event_ids are N corresponding event_ids
"""
if event_ids is None:
log.debug("Faking event ids")
event_ids = range(len(chopped_polys))
for (subquads, frac_areas, (x_idxs, y_idxs)), total_area, evid in zip(chopped_polys, poly_areas, event_ids):
quad_fracs = slicer.quad_frac_from_poly_frac_area(
frac_areas, total_area, x_idxs, y_idxs)
# While this runs faster without the call to tuple,
# the chained map iterators result in a swiss cheese grid
# because the mean consumes an array, leaving it unavailable for
# the zip loop below.
# sq_arrays = tuple(map(double_array, subquads))
# About 70% of the runtime of this function is in calculating the
# mean positions.
# sq_means = tuple(map(mean_ax0, sq_arrays))
sq_means = map(gen_sq_mean, subquads)
for (sq, sq_mean), frac_area, x_idx, y_idx, quad_area in zip(
sq_means, frac_areas, x_idxs, y_idxs, quad_fracs):
yield(sq, sq_mean, frac_area, quad_area, (x_idx, y_idx), evid)
def split_event_data(split_polys, poly_areas, slicer, event_ids):
"""
split_polys is a list of N original polygons whose elements contain the
sub-polys of each polygon. It is the data structure created by
QuadMeshPolySlicer.slice
event_ids are N corresponding event_ids
"""
# fromiter would run faster if we could precalculate the count, though
# doing so would require iterating to sum some sizes, so it's not clear
# if there's much net benefit.
dtype = np.dtype([
# ('poly','f8',(4,2)), # polys could be any number of verts, so don't use.
('poly_ctr', 'f8', (2,)),
('event_frac_area', 'f8'),
('mesh_frac_area', 'f8'),
('mesh_idx', 'i8', (2,)),
# ('mesh_y_idx', 'i8'),
('event_id', 'u8')
])
parts_of_split_polys = [p for p in
gen_split_events(split_polys, poly_areas, slicer, event_ids=event_ids)]
# Each element here will be an (n_verts, 2) array of polygon vertex locations.
sub_polys = [sp[0] for sp in parts_of_split_polys]
# These are frac_area, quad_area, (x_idx, y_idx), evid - i.e.,
# the parts with the same length that can be turned into an array.
split_event_property_iter = (sp[1:] for sp in parts_of_split_polys)
n_sub_polys = len(sub_polys)
# for sp, (frac_area, quad_area, idxs, evid) in zip(sub_polys, split_event_property_iter):
# sp.mean(axis=0)
d = np.fromiter(split_event_property_iter, dtype=dtype, count=n_sub_polys)
return sub_polys, d
def split_event_dataset_from_props(props, centroid_names=('split_event_lon',
'split_event_lat')):
""" props is the numpy array with named dtype returned by split_event_dataset """
dims = ('number_of_split_event_children',)
d ={
centroid_names[0]: {'dims':dims, 'data':props['poly_ctr'][:,0]},
centroid_names[1]: {'dims':dims, 'data':props['poly_ctr'][:,1]},
'split_event_mesh_area_fraction': {'dims':dims, 'data':props['mesh_frac_area']},
'split_event_area_fraction': {'dims':dims, 'data':props['event_frac_area']},
'split_event_mesh_x_idx': {'dims':dims, 'data':props['mesh_idx'][:,0]},
'split_event_mesh_y_idx': {'dims':dims, 'data':props['mesh_idx'][:,1]},
'split_event_parent_event_id': {'dims':dims, 'data':props['event_id']},
}
return xr.Dataset.from_dict(d)
def split_group_dataset_from_props(props, centroid_names=('split_group_lon',
'split_group_lat')):
""" props is the numpy array with named dtype returned by split_event_dataset """
dims = ('number_of_split_group_children',)
d ={
centroid_names[0]: {'dims':dims, 'data':props['poly_ctr'][:,0]},
centroid_names[1]: {'dims':dims, 'data':props['poly_ctr'][:,1]},
'split_group_mesh_area_fraction': {'dims':dims, 'data':props['mesh_frac_area']},
'split_group_area_fraction': {'dims':dims, 'data':props['event_frac_area']},
'split_group_mesh_x_idx': {'dims':dims, 'data':props['mesh_idx'][:,0]},
'split_group_mesh_y_idx': {'dims':dims, 'data':props['mesh_idx'][:,1]},
'split_group_parent_group_id': {'dims':dims, 'data':props['event_id']},
}
return xr.Dataset.from_dict(d)
def split_flash_dataset_from_props(props, centroid_names=('split_flash_lon',
'split_flash_lat')):
""" props is the numpy array with named dtype returned by split_event_dataset """
dims = ('number_of_split_flash_children',)
d ={
centroid_names[0]: {'dims':dims, 'data':props['poly_ctr'][:,0]},
centroid_names[1]: {'dims':dims, 'data':props['poly_ctr'][:,1]},
'split_flash_mesh_area_fraction': {'dims':dims, 'data':props['mesh_frac_area']},
'split_flash_area_fraction': {'dims':dims, 'data':props['event_frac_area']},
'split_flash_mesh_x_idx': {'dims':dims, 'data':props['mesh_idx'][:,0]},
'split_flash_mesh_y_idx': {'dims':dims, 'data':props['mesh_idx'][:,1]},
'split_flash_parent_flash_id': {'dims':dims, 'data':props['event_id']},
}
return xr.Dataset.from_dict(d)
def replicate_and_weight_split_child_dataset(glm, split_child_dataset,
parent_id='event_id', split_child_parent_id='split_event_parent_event_id',
names=['event_energy', 'event_time_offset',
'event_parent_flash_id', 'event_parent_group_id'],
weights={'event_energy':'split_event_area_fraction'}):
"""
Create a child level of the hierarchy that corresponds properties of its
parent that have been geometrically split. Apply fractional weights.
The default args/kwargs show how to split the GLM event dataset into a set
of sub-event children. This function can also be used to split flashes
given a set of flash-level polygons. Those polygons are assumed to have no
overlap - i.e., the constituent events from that flash have been unioned
and then split. In this way, we divide up flash-level properties without
regard to the values of the lower-level children.
"""
split_dims = getattr(split_child_dataset, split_child_parent_id).dims
replicated_event_ids = getattr(split_child_dataset, split_child_parent_id)
# it is important that this step keep the events in the same order
# and retain the replication.
glm_data = glm.reduce_to_entities(parent_id, replicated_event_ids)
for name in names:
new_name = 'split_' + name
new_var = getattr(glm_data, name).data
if name in weights:
weight_var = getattr(split_child_dataset, weights[name])
# dimension names won't match, but lengths should.
new_var = new_var*weight_var.data # should ensure copy, not view
# add variable to the dataset
split_child_dataset[new_name] = (split_dims, new_var) #{'dims':split_dims, 'data':new_var}
return split_child_dataset
| [
"functools.partial",
"numpy.dtype",
"numpy.einsum",
"xarray.Dataset.from_dict",
"logging.NullHandler",
"numpy.fromiter",
"logging.getLogger"
] | [((176, 203), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (193, 203), False, 'import logging\n'), ((258, 289), 'functools.partial', 'partial', (['np.asarray'], {'dtype': '"""f8"""'}), "(np.asarray, dtype='f8')\n", (265, 289), False, 'from functools import partial\n'), ((219, 240), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (238, 240), False, 'import logging\n'), ((2281, 2421), 'numpy.dtype', 'np.dtype', (["[('poly_ctr', 'f8', (2,)), ('event_frac_area', 'f8'), ('mesh_frac_area',\n 'f8'), ('mesh_idx', 'i8', (2,)), ('event_id', 'u8')]"], {}), "([('poly_ctr', 'f8', (2,)), ('event_frac_area', 'f8'), (\n 'mesh_frac_area', 'f8'), ('mesh_idx', 'i8', (2,)), ('event_id', 'u8')])\n", (2289, 2421), True, 'import numpy as np\n'), ((3235, 3305), 'numpy.fromiter', 'np.fromiter', (['split_event_property_iter'], {'dtype': 'dtype', 'count': 'n_sub_polys'}), '(split_event_property_iter, dtype=dtype, count=n_sub_polys)\n', (3246, 3305), True, 'import numpy as np\n'), ((4212, 4235), 'xarray.Dataset.from_dict', 'xr.Dataset.from_dict', (['d'], {}), '(d)\n', (4232, 4235), True, 'import xarray as xr\n'), ((5117, 5140), 'xarray.Dataset.from_dict', 'xr.Dataset.from_dict', (['d'], {}), '(d)\n', (5137, 5140), True, 'import xarray as xr\n'), ((6022, 6045), 'xarray.Dataset.from_dict', 'xr.Dataset.from_dict', (['d'], {}), '(d)\n', (6042, 6045), True, 'import xarray as xr\n'), ((348, 389), 'numpy.einsum', 'np.einsum', (['sqa', '[0, Ellipsis]', '[Ellipsis]'], {}), '(sqa, [0, Ellipsis], [Ellipsis])\n', (357, 389), True, 'import numpy as np\n')] |
"""
This modules defines the board of the game based on the configuration the user has requested
"""
import numpy as np
import pawn
import math
##DIRECTIONS##
NORTHWEST = "northwest"
NORTHEAST = "northeast"
SOUTHWEST = "southwest"
SOUTHEAST = "southeast"
# Constant for Obstacle in the game, 21 because max pawn_id in our game is 20
OBSTACLE = 21
class Board:
# Initialize the board based on the config the user requested
def __init__(self, numOfSquares=8, num_of_pawns = 12):
self.board = np.zeros((numOfSquares, numOfSquares))
self.p1_pawns = {}
self.p2_pawns = {}
self.num_of_pawns = num_of_pawns
if numOfSquares == 10:
self.num_of_pawns = 20
elif numOfSquares == 6:
self.num_of_pawns = 6
num_of_rows = self.num_of_pawns / (numOfSquares / 2)
self.initialize_players(0, 1, self.num_of_pawns)
if numOfSquares == 8 and num_of_pawns != 6:
self.initialize_players(int(numOfSquares - num_of_rows), 0, self.num_of_pawns, False)
else:
self.initialize_players(int(numOfSquares - num_of_rows), 1, self.num_of_pawns, False)
self.total_moves = 0
self.moves_since_last_capture = 0
# Initialize player pawns and populate the board with their positions
def initialize_players(self, start_row, start_index, num_of_pawns, p1=True):
rows, cols = self.board.shape
num_rows_to_fill = math.ceil(num_of_pawns / (cols / 2))
pawn_id = 1
for row in range(start_row, start_row + num_rows_to_fill):
for col in range(start_index, cols, 2):
if pawn_id > num_of_pawns:
break
if (p1):
self.board[row, col] = pawn_id
self.p1_pawns[pawn_id] = pawn.Pawn(pawn_id, row, col, start_row)
pawn_id += 1
else:
self.board[row, col] = -pawn_id
self.p2_pawns[-pawn_id] = pawn.Pawn(-pawn_id, row, col, start_row)
pawn_id += 1
if start_index == 0:
start_index = 1
else:
start_index = 0
# Updates the board and pawn object to according to new coordinate
def update_board_pawn(self, new_x, new_y, pawn, p1=True):
old_x, old_y = pawn.coordinates
temp = self.board[new_x][new_y]
self.board[new_x][new_y] = pawn.id
self.board[old_x][old_y] = temp
if p1:
self.p1_pawns[pawn.id].coordinates = (new_x, new_y)
else:
self.p2_pawns[pawn.id].coordinates = (new_x, new_y)
# Returns all the available to move for a given player
# TODO: Rename the parameters to follow consistency
def get_available_pawns(self, player_pawns_list, p2_list, dir):
temp_dict = player_pawns_list
player_available_pawns = []
for p in temp_dict:
if not temp_dict[int(p)].is_king:
x, y = self.get_new_coordinates(dir[0], temp_dict[p])
a, b = self.get_new_coordinates(dir[1], temp_dict[p])
if self.check_boundry(x, y) and p not in player_available_pawns:
if self.board[x][y] == 0:
player_available_pawns.append(p)
elif self.board[x][y] in p2_list:
x1, y1 = self.get_new_coordinates(dir[0], p2_list[self.board[x][y]])
if self.check_boundry(x1, y1) and self.board[x1, y1] == 0:
player_available_pawns.append(p)
if self.check_boundry(a, b) and p not in player_available_pawns:
if self.board[a][b] == 0:
player_available_pawns.append(p)
elif self.board[a][b] in p2_list:
a1, b1 = self.get_new_coordinates(dir[1], p2_list[self.board[a][b]])
if self.check_boundry(a1, b1) and self.board[a1, b1] == 0:
player_available_pawns.append(p)
else:
temp_list = self.get_kings_move(temp_dict[p])
if len(temp_list) > 0:
player_available_pawns.append(p)
return player_available_pawns
# Checks if given point (x, y) is within the board
def check_boundry(self, x, y):
rows, cols = self.board.shape
if (0 <= x < rows) and (0 <= y < cols):
return True
else:
return False
# This method is used to search for all the movable pawns of the players
def check_available_pawns_to_move(self, p1=False):
"""
:param p1 boolean
:return array array of pawns that can move forward/backward
Available pawns to move
"""
if p1 == True:
return self.get_available_pawns(self.p1_pawns, self.p2_pawns, [SOUTHWEST, SOUTHEAST])
else:
return self.get_available_pawns(self.p2_pawns, self.p1_pawns, [NORTHWEST, NORTHEAST])
# Given direction, return the corresponding pawn
def get_new_coordinates(self, dir, pawn):
"""
Returns the coordinates one square in a different direction to (x,y).
"""
x, y = (pawn.coordinates)
if dir == NORTHWEST:
return x - 1, y - 1
elif dir == SOUTHWEST:
return x + 1, y - 1
elif dir == NORTHEAST:
return x - 1, y + 1
elif dir == SOUTHEAST:
return x + 1, y + 1
else:
return 0
# Given pawn, return all the coordinates the pawn can move for player 1
def get_player1_moves(self, dir1, dir2, pawn):
get_pawn_moves = []
sw_x, sw_y = self.get_new_coordinates(dir1, pawn)
se_x, se_y = self.get_new_coordinates(dir2, pawn)
if self.check_boundry(sw_x, sw_y) and self.board[sw_x][sw_y] < 0 and self.board[sw_x][sw_y] != OBSTACLE:
sw_sw_x, sw_sw_y = self.get_new_coordinates(dir1, self.p2_pawns[self.board[sw_x][sw_y]])
if self.check_boundry(sw_sw_x, sw_sw_y) and self.board[sw_sw_x][sw_sw_y] == 0:
get_pawn_moves.append((sw_sw_x, sw_sw_y))
if self.check_boundry(se_x, se_y) and self.board[se_x][se_y] < 0 and self.board[sw_x][sw_y] != OBSTACLE:
se_se_x, se_se_y = self.get_new_coordinates(dir2, self.p2_pawns[self.board[se_x][se_y]])
if self.check_boundry(se_se_x, se_se_y) and self.board[se_se_x][se_se_y] == 0:
get_pawn_moves.append((se_se_x, se_se_y))
if self.check_boundry(sw_x, sw_y) and self.board[sw_x][sw_y] == 0:
get_pawn_moves.append((sw_x, sw_y))
if self.check_boundry(se_x, se_y) and self.board[se_x][se_y] == 0:
get_pawn_moves.append((se_x, se_y))
return get_pawn_moves
# Given pawn, return all the coordinates the pawn can move for player 1
# TODO: combine this and above method to one single method
def get_player2_moves(self, dir1, dir2, pawn):
get_pawn_moves = []
nw_x, nw_y = self.get_new_coordinates(dir1, pawn)
ne_x, ne_y = self.get_new_coordinates(dir2, pawn)
if self.check_boundry(nw_x, nw_y) and self.board[nw_x][nw_y] > 0 and self.board[nw_x][nw_y] != OBSTACLE:
nw_nw_x, nw_nw_y = self.get_new_coordinates(dir1, self.p1_pawns[self.board[nw_x][nw_y]])
if self.check_boundry(nw_nw_x, nw_nw_y) and self.board[nw_nw_x][nw_nw_y] == 0:
get_pawn_moves.append((nw_nw_x, nw_nw_y))
if self.check_boundry(ne_x, ne_y) and self.board[ne_x][ne_y] > 0 and self.board[ne_x][ne_y] != OBSTACLE:
ne_ne_x, ne_ne_y = self.get_new_coordinates(dir2, self.p1_pawns[self.board[ne_x][ne_y]])
if self.check_boundry(ne_ne_x, ne_ne_y) and self.board[ne_ne_x][ne_ne_y] == 0:
get_pawn_moves.append((ne_ne_x, ne_ne_y))
if self.check_boundry(nw_x, nw_y) and self.board[nw_x][nw_y] == 0:
get_pawn_moves.append((nw_x, nw_y))
if self.check_boundry(ne_x, ne_y) and self.board[ne_x][ne_y] == 0:
get_pawn_moves.append((ne_x, ne_y))
return get_pawn_moves
# This method is used to check the possible coordinates that the pawn can move to
def get_moves(self, pawn):
"""
:param pawn Pawn object
:return array array of coordinates the pawn can move to
Returns a list of legal move locations from a set of coordinates (x,y) on the board.
If that location is empty, then get_moves() return an empty list.
"""
x, y = (pawn.coordinates)
pawn_id = self.board[x][y]
if pawn_id != 0:
if pawn_id < 0 and pawn.is_king is False:
get_pawn_moves = self.get_player2_moves(NORTHWEST, NORTHEAST, pawn)
elif pawn_id > 0 and pawn.is_king is False:
get_pawn_moves = self.get_player1_moves(SOUTHWEST, SOUTHEAST, pawn)
else:
get_pawn_moves = self.get_kings_move(pawn)
else:
get_pawn_moves = []
return get_pawn_moves
# Given a King pawn, get all the possible coordinates that the pawn can move to
def get_kings_move(self, pawn):
x, y = (pawn.coordinates)
get_pawn_moves = []
pawn_id = self.board[x][y]
if pawn_id != 0:
if pawn_id < 0:
get_pawn_moves.extend(self.get_player2_moves(NORTHWEST, NORTHEAST, pawn))
get_pawn_moves.extend(self.get_player2_moves(SOUTHWEST, SOUTHEAST, pawn))
elif pawn_id > 0:
get_pawn_moves.extend(self.get_player1_moves(NORTHWEST, NORTHEAST, pawn))
get_pawn_moves.extend(self.get_player1_moves(SOUTHWEST, SOUTHEAST, pawn))
return get_pawn_moves
# This method is used to analyze the move when the pawn is selected
def check_move_type(self, pawn, direction):
"""
:param pawn Pawn object
:return int 0 for simple move, 1 capturing move and -1 if the move cannot be made
"""
new_x, new_y = self.get_new_coordinates(direction, pawn)
new_id = self.board[new_x, new_y]
if new_id == 0:
return 0
elif new_id > 0 and pawn.id > 0 or new_id < 0 and pawn.id < 0:
return -1
else:
return 1
# This method controls the pawn's movement
def move_pawn(self, pawn, coordinate):
"""
This method handle the pawn movement inside the board
:param pawn_id int
Changes the position of the pawn selected and state of board
:return list if the move is of type capture and can be chained
"""
direction = self.get_direction_from_coordinates(pawn, coordinate)
self.total_moves += 1
self.moves_since_last_capture += 1
chain_capture_coordinates = []
if self.check_move_type(pawn, direction) == 0:
self.simple_move(pawn, direction)
elif self.check_move_type(pawn, direction) == 1:
self.move_capture_pawn(pawn, direction)
# Check if the move can be chained by another capture
chain_capture_coordinates = self.get_chain_capture_coordinates(pawn)
if (pawn.id > 0 and pawn.coordinates[0] == self.board.shape[0] - 1) or (
pawn.id < 0 and pawn.coordinates[0] == 0):
pawn.is_king = True
return chain_capture_coordinates
def get_chain_capture_coordinates(self, pawn):
chain_capture_coordinates = []
moves_list = self.get_moves(pawn)
for coordinate in moves_list:
move_type = self.check_move_type(pawn, self.get_direction_from_coordinates(pawn, coordinate))
if move_type == 1:
chain_capture_coordinates.append(coordinate)
return chain_capture_coordinates
# This method is used when the move type is a capturing move
def move_capture_pawn(self, pawn, direction):
"""
:param pawn Pawn object
"""
pawn_id = pawn.id
pawn_coordinates = pawn.coordinates
rival_pawn_coordinates = self.get_new_coordinates(direction, pawn)
rival_pawn = self.p1_pawns[self.board[rival_pawn_coordinates]] if self.board[rival_pawn_coordinates] > 0 else \
self.p2_pawns[self.board[rival_pawn_coordinates]]
new_x, new_y = self.get_new_coordinates(direction, rival_pawn)
self.remove_pawn(rival_pawn, rival_pawn.id > 0)
self.update_board_pawn(new_x, new_y, pawn, pawn.id > 0)
# This method is used when the move type is simple, move the pawn diagonally, change the state of board and coordinate of given pawn
def simple_move(self, pawn, direction):
"""
:param pawn Pawn object
"""
new_x, new_y = self.get_new_coordinates(direction, pawn)
self.update_board_pawn(new_x, new_y, pawn, pawn.id > 0)
# This method is used to update the state of the board and pawn
def update_board_pawn(self, new_x, new_y, pawn, p1=True):
self.board[new_x, new_y] = pawn.id
self.board[pawn.coordinates] = 0
if (p1):
self.p1_pawns[pawn.id].coordinates = (new_x, new_y)
else:
self.p2_pawns[pawn.id].coordinates = (new_x, new_y)
# This method is used to remove pawn from players' dictionary and updates the state of board
def remove_pawn(self, pawn, p1=True):
self.moves_since_last_capture = 0
pawn_id = pawn.id
pawn_coordinates = pawn.coordinates
if p1:
self.p1_pawns.pop(pawn_id, None)
else:
self.p2_pawns.pop(pawn_id, None)
self.board[pawn_coordinates] = 0
# This method checks if the game is over or not.
def check_game_status(self):
"""
This method checks the status of the game
Returns true if the game is over and false if the game is still active in progress
"""
if self.moves_since_last_capture > 40 or len(self.p1_pawns) == 0 or len(self.p2_pawns) == 0:
return True
return False
# This method is used to declare winner
def declare_winner(self):
"""
This method declares the winner of the game
Returns 1 | 0 | -1, 1 if player1 is the winner, -1 if player2 is the winner and 0 if its a tie
"""
if len(self.p1_pawns) == 0:
return -1
elif len(self.p2_pawns) == 0:
return 1
else:
return 1 if len(self.p1_pawns) > len(self.p2_pawns) else -1
# This method gives the direction from the given pawn and new coordinate
def get_direction_from_coordinates(self, pawn, new_coordinate):
x, y = (pawn.coordinates)
new_x, new_y = new_coordinate
if x > new_x and y > new_y:
return NORTHWEST
elif x < new_x and y > new_y:
return SOUTHWEST
elif x > new_x and y < new_y:
return NORTHEAST
elif x < new_x and y < new_y:
return SOUTHEAST
# Returns the number of kings in the given pawn list
def total_kings(self, pawns):
count = 0
for pawn in pawns.values():
if pawn.is_king:
count += 1
return count
# Evaluate score (simpler version)
def game_score(self):
return len(self.p1_pawns) - len(self.p2_pawns) + \
(self.total_kings(self.p1_pawns) * 0.5 - self.total_kings(self.p2_pawns) * 0.5)
# Computes the score of the state according to pawn coordinate position and is_king status
def compute_score(self):
score = 0
# if player1's turn
if self.total_moves % 2 == 0:
for i in range(self.board[0].size):
for j in range(self.board[0].size):
pawn = self.board[i][j]
if pawn in self.p1_pawns.keys() or pawn in self.p2_pawns.keys():
if pawn in self.p1_pawns.keys() and self.p1_pawns[pawn].is_king:
score += 10
elif pawn in self.p2_pawns.keys() and self.p2_pawns[pawn].is_king:
score -= 10
elif pawn in self.p1_pawns.keys() and i < 4:
score += 5
elif pawn in self.p2_pawns.keys() and i < 4:
score -= 7
elif pawn in self.p1_pawns.keys() and i >= 4:
score += 7
elif pawn in self.p2_pawns.keys() and i >= 4:
score -= 5
# if player2's turn
else:
for i in range(self.board[0].size):
for j in range(self.board[0].size):
pawn = self.board[i][j]
if pawn in self.p1_pawns.keys() or pawn in self.p2_pawns.keys():
if pawn in self.p1_pawns.keys() and self.p1_pawns[pawn].is_king:
score += 10
elif pawn in self.p2_pawns.keys() and self.p2_pawns[pawn].is_king:
score -= 10
elif pawn in self.p1_pawns.keys() and i < 4:
score += 7
elif pawn in self.p2_pawns.keys() and i < 4:
score -= 5
elif pawn in self.p1_pawns.keys() and i >= 4:
score += 7
elif pawn in self.p2_pawns.keys() and i >= 4:
score -= 5
#print(f"score: {score / (len(self.p1_pawns) + (len(self.p2_pawns)))}")
return score / (len(self.p1_pawns) + (len(self.p2_pawns)))
# This method adds obstacles to the board
def set_obstacles(self, num_of_obstacles=0):
obstacles = []
rows = self.board.shape[0]
while num_of_obstacles > 0:
x = np.random.randint(rows)
y = np.random.randint(rows)
if self.board[x, y] == 0:
self.board[x, y] = OBSTACLE
obstacles.append((x, y))
num_of_obstacles -= 1
return obstacles
# String representation of the Board object
def __str__(self):
return f"Board: \n{self.board}\n"
if __name__ == "__main__":
board = Board()
obs = board.set_obstacles(3)
print(board)
print(obs)
| [
"pawn.Pawn",
"numpy.random.randint",
"numpy.zeros",
"math.ceil"
] | [((510, 548), 'numpy.zeros', 'np.zeros', (['(numOfSquares, numOfSquares)'], {}), '((numOfSquares, numOfSquares))\n', (518, 548), True, 'import numpy as np\n'), ((1448, 1484), 'math.ceil', 'math.ceil', (['(num_of_pawns / (cols / 2))'], {}), '(num_of_pawns / (cols / 2))\n', (1457, 1484), False, 'import math\n'), ((17954, 17977), 'numpy.random.randint', 'np.random.randint', (['rows'], {}), '(rows)\n', (17971, 17977), True, 'import numpy as np\n'), ((17994, 18017), 'numpy.random.randint', 'np.random.randint', (['rows'], {}), '(rows)\n', (18011, 18017), True, 'import numpy as np\n'), ((1814, 1853), 'pawn.Pawn', 'pawn.Pawn', (['pawn_id', 'row', 'col', 'start_row'], {}), '(pawn_id, row, col, start_row)\n', (1823, 1853), False, 'import pawn\n'), ((2007, 2047), 'pawn.Pawn', 'pawn.Pawn', (['(-pawn_id)', 'row', 'col', 'start_row'], {}), '(-pawn_id, row, col, start_row)\n', (2016, 2047), False, 'import pawn\n')] |
from pathlib import Path
import numpy as np
import joblib
from keras.preprocessing import image
from keras.applications import vgg16
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
# Path to folders with training data
dog_path = Path("img") / "dogs"
not_dog_path = Path("img") / "not_dogs"
images = []
labels = []
# Load all the not-dog images
for img in not_dog_path.glob("*.png"):
# Load the image from disk
img = image.load_img(img)
# Convert the image to a numpy array
image_array = image.img_to_array(img)
# Add the image to the list of images
images.append(image_array)
# For each 'not dog' image, the expected value should be 0
labels.append(0)
# Load all the dog images
for img in dog_path.glob("*.png"):
# Load the image from disk
img = image.load_img(img)
# Convert the image to a numpy array
image_array = image.img_to_array(img)
# Add the image to the list of images
images.append(image_array)
# For each 'dog' image, the expected value should be 1
labels.append(1)
# Create a single numpy array with all the images we loaded
x_train = np.array(images)
# Also convert the labels to a numpy array
y_train = np.array(labels)
# Normalize image data to 0-to-1 range
x_train = vgg16.preprocess_input(x_train)
# Load a pre-trained neural network to use as a feature extractor
feature_extraction_model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(64, 64, 3))
# Extract features for each image (all in one pass)
features_x = feature_extraction_model.predict(x_train)
# Create a model and add layers
model = Sequential()
model.add(Flatten(input_shape=features_x.shape[1:]))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
# Compile the model
model.compile(
loss="binary_crossentropy",
optimizer="adam",
metrics=['accuracy']
)
# Train the model
model.fit(
features_x,
y_train,
epochs=10,
shuffle=True
)
# Load an image file to test, resizing it to 64x64 pixels (as required by this model)
img = image.load_img("img/not_dog.png", target_size=(64, 64))
# img = image.load_img("img/dog.png", target_size=(64, 64))
# Convert the image to a numpy array
image_array = image.img_to_array(img)
# Add a forth dimension to the image (since Keras expects a bunch of images, not a single image)
images = np.expand_dims(image_array, axis=0)
# Normalize the data
images = vgg16.preprocess_input(images)
# Use the pre-trained neural network to extract features from our test image (the same way we did to train the model)
features = feature_extraction_model.predict(images)
# Given the extracted features, make a final prediction using our own model
results = model.predict(features)
# Since we are only testing one image with possible class, we only need to check the first result's first element
single_result = results[0][0]
# Print the result
print("Likelihood that this image contains a dog: {}%".format(int(single_result * 100))) | [
"keras.models.Sequential",
"keras.layers.Dropout",
"keras.layers.Flatten",
"numpy.expand_dims",
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.load_img",
"pathlib.Path",
"numpy.array",
"keras.layers.Dense",
"keras.applications.vgg16.VGG16",
"keras.applications.vgg16.preproc... | [((1161, 1177), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1169, 1177), True, 'import numpy as np\n'), ((1232, 1248), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1240, 1248), True, 'import numpy as np\n'), ((1299, 1330), 'keras.applications.vgg16.preprocess_input', 'vgg16.preprocess_input', (['x_train'], {}), '(x_train)\n', (1321, 1330), False, 'from keras.applications import vgg16\n'), ((1425, 1500), 'keras.applications.vgg16.VGG16', 'vgg16.VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': '(64, 64, 3)'}), "(weights='imagenet', include_top=False, input_shape=(64, 64, 3))\n", (1436, 1500), False, 'from keras.applications import vgg16\n'), ((1650, 1662), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1660, 1662), False, 'from keras.models import Sequential\n'), ((2127, 2182), 'keras.preprocessing.image.load_img', 'image.load_img', (['"""img/not_dog.png"""'], {'target_size': '(64, 64)'}), "('img/not_dog.png', target_size=(64, 64))\n", (2141, 2182), False, 'from keras.preprocessing import image\n'), ((2295, 2318), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (2313, 2318), False, 'from keras.preprocessing import image\n'), ((2426, 2461), 'numpy.expand_dims', 'np.expand_dims', (['image_array'], {'axis': '(0)'}), '(image_array, axis=0)\n', (2440, 2461), True, 'import numpy as np\n'), ((2493, 2523), 'keras.applications.vgg16.preprocess_input', 'vgg16.preprocess_input', (['images'], {}), '(images)\n', (2515, 2523), False, 'from keras.applications import vgg16\n'), ((268, 279), 'pathlib.Path', 'Path', (['"""img"""'], {}), "('img')\n", (272, 279), False, 'from pathlib import Path\n'), ((304, 315), 'pathlib.Path', 'Path', (['"""img"""'], {}), "('img')\n", (308, 315), False, 'from pathlib import Path\n'), ((465, 484), 'keras.preprocessing.image.load_img', 'image.load_img', (['img'], {}), '(img)\n', (479, 484), False, 'from keras.preprocessing import image\n'), ((545, 568), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (563, 568), False, 'from keras.preprocessing import image\n'), ((831, 850), 'keras.preprocessing.image.load_img', 'image.load_img', (['img'], {}), '(img)\n', (845, 850), False, 'from keras.preprocessing import image\n'), ((911, 934), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (929, 934), False, 'from keras.preprocessing import image\n'), ((1674, 1715), 'keras.layers.Flatten', 'Flatten', ([], {'input_shape': 'features_x.shape[1:]'}), '(input_shape=features_x.shape[1:])\n', (1681, 1715), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1727, 1756), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1732, 1756), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1768, 1780), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1775, 1780), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1792, 1822), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1797, 1822), False, 'from keras.layers import Dense, Dropout, Flatten\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 7 18:44:59 2017
@author: pramos
Usufull functions for UVW velocity-space treatments
"""
import numpy as np
#constants
incl=np.deg2rad(62.87124882) #inclination of galactic plane
alom=np.deg2rad(282.8594813) #RA of the equatorial node
lom=np.deg2rad(32.93680516) #longitude gal of Ascending node of the galactic equator
deo=np.deg2rad(27.12825111) #dec NGP
alpm=np.deg2rad(192.8594813) #RA NGP
theta=np.deg2rad(122.93680516) #londitude NCP
const=4.7404705 #conversion factor km·yr/s
def radec2lb(ra,dec):
"""
Returns galactic longitude and latitude (radians) given
the right ascention and declination (radians)
"""
#latitude
sinb=np.sin(dec)*np.cos(incl)-np.cos(dec)*np.sin(incl)*np.sin(ra-alom)
b=np.arcsin(sinb)
#longitude
sinl=(np.sin(dec)*np.sin(incl)+np.cos(dec)*np.cos(incl)*np.sin(ra-alom))/np.cos(dec)
cosl=np.cos(dec)*np.cos(ra-alom)/np.cos(dec)
l=np.arctan2(sinl,cosl)+lom
if l<0:l=l+2*np.pi
return l,b
def pmradec2lb(ra,dec,l,b,mua,mud):
"""
Returns proper motion in galactic coordinates given
the right ascention, declination,Gal.Lat and Long. (radians) and
equatorial proper motions (pmra,pmdec in mas/yr)
"""
siphi=np.cos(deo)*np.sin(ra-alpm)/np.cos(b)
cophi=(np.sin(deo)-np.sin(dec)*np.sin(b))/(np.cos(dec)*np.cos(b))
return mua*cophi+mud*siphi,-mua*siphi+mud*cophi
def Jacob(x):
"""
Returns the Jacobian of the transformation from ICRS to l,b,plx,U,V,W.
The input parameters is an iterator of the form:
x[0]: right ascention (equatorial) -> degrees
x[1]: declination (equatorial) -> degrees
x[2]: parallax -> mas
x[3]: proper motion (mualpha*) -> mas/yr
x[4]: proper motion (mudelta) -> mas/yr
x[5]: radial velocity -> km/s
"""
#inicialization
jac0=np.zeros([6,6])
jac1=np.zeros([6,6])
jac2=np.zeros([6,6])
jac3=np.zeros([6,6])
#RA & DEC in degrees
a=np.deg2rad(x[0])
d=np.deg2rad(x[1])
#Parallax in mas
w=x[2]
#proper motion in mas/yr
mua=x[3]
mud=x[4]
#radial velocity along l.o.s. in km/s
vrad=x[5]
#galactic coordinates in degrees
l,b=radec2lb(a,d)
cd=np.cos(d);sd=np.sin(d);tand=np.tan(d)
cb=np.cos(b); sb=np.sin(b);tanb=np.tan(b)
cl=np.cos(l); sl=np.sin(l)
cdeo=np.cos(deo); sdeo=np.sin(deo)
cincl=np.cos(incl); sincl=np.sin(incl)
calom=np.cos(a-alom); salom=np.sin(a-alom); talom=np.tan(a-alom)
clt=np.cos(l-theta); slt=np.sin(l-theta)
#units
jac0[0,0]=np.pi/180/3600
jac0[1,1]=np.pi/180/3600
jac0[2,2]=1
jac0[3,3]=1
jac0[4,4]=1
jac0[5,5]=1
#from equatorial to galactic coordinates
#l-alpha
jac1[0,0]=((cincl/(calom)**2+sincl/calom*talom*tand)/(1+(cincl*talom+sincl/calom*tand)**2))
#l-delta
jac1[0,1]=(sincl/(calom*(cd)**2)/(1+(cincl*talom+sincl/calom*tand)**2))
#b-alpha
jac1[1,0]=(-((calom*cd*sincl)/np.sqrt(1-(cincl*sd-cd*salom*sincl)**2)))
#b-delta
jac1[1,1]=((cd*cincl+salom*sd*sincl)/np.sqrt(1-(cincl*sd-cd*salom*sincl)**2))
#parallax-parallax
jac1[2,2]=1
#mua - mua
jac1[3,3]=1
#mud - mud
jac1[4,4]=1
#vrad - vrad
jac1[5,5]=1
""""""""""""""""""
#from mua/mub to mul/mub
#l-l
jac2[0,0]=1
#b-b
jac2[1,1]=1
#par-par
jac2[2,2]=1
#mul-l
jac2[3,0]=-((mud*cdeo*clt)/(1-(cb*cdeo*clt+sb*sdeo)**2)**(0.5))-(mua*cdeo*( \
cb*cdeo*clt+sb*sdeo)*(sdeo-sb*(cb*cdeo*clt+sb*sdeo))*slt)/(1-(cb*cdeo*clt+sb*sdeo)**2)**( \
1.5)+(mua*cdeo*sb*slt)/(1-(cb*cdeo*clt+sb*sdeo)**2)**(0.5)+(mud*cb*cdeo**2*(cb*cdeo*clt+sb*sdeo)*slt*slt)/( \
1-(cb*cdeo*clt+sb*sdeo)**2)**(1.5)
#ml-b
jac2[3,1]=(mua/(cb)*(-(cdeo*clt*sb)+cb*sdeo)*(cb*cdeo*clt+sb*sdeo)*(sdeo-sb*(cb*cdeo*clt+sb*sdeo)))/( \
1-(cb*cdeo*clt+sb*sdeo)**2)**(1.5)+(mua/(cb)*(-(sb*(-(cdeo*clt*sb)+cb*sdeo))-cb*(cb*cdeo*clt+sb*sdeo)))/ \
(1-(cb*cdeo*clt+sb*sdeo)**2)**(0.5)-(mud*cdeo*(-(cdeo*clt*sb)+cb*sdeo)*(cb*cdeo*clt+sb*sdeo)*slt)/( \
1-(cb*cdeo*clt+sb*sdeo)**2)**(1.5)+(mua/(cb)*(sdeo-sb*(cb*cdeo*clt+sb*sdeo))*tanb)/(1-(cb*cdeo*clt+sb*sdeo)**2)**(0.5)
#ml-mua
jac2[3,3]=(1/(cb)*(sdeo-sb*(cb*cdeo*clt+sb*sdeo)))/(1-(cb*cdeo*clt+sb*sdeo)**2)**(0.5)
#ml-mud
jac2[3,4]=-((cdeo*slt)/(1-(cb*cdeo*clt+sb*sdeo)**2)**(0.5))
#mb-l
jac2[4,0]=(mua*cdeo*clt)/(1-(cb*cdeo*clt+sb*sdeo)**2)**(0.5)-(mud*cdeo*(cb*cdeo*clt+sb*sdeo)*(sdeo-sb*( \
cb*cdeo*clt+sb*sdeo))*slt)/(1-(cb*cdeo*clt+sb*sdeo)**2)**(1.5)+(mud*cdeo*sb*slt)/(1-(cb*cdeo*clt+sb*sdeo)**2)**(0.5)-( \
mua*cb*cdeo**2*(cb*cdeo*clt+sb*sdeo)*slt*slt)/(1-(cb*cdeo*clt+sb*sdeo)**2)**(1.5)
#mb-b
jac2[4,1]=(mud/(cb)*(-(cdeo*clt*sb)+cb*sdeo)*(cb*cdeo*clt+sb*sdeo)*(sdeo-sb*(cb*cdeo*clt+sb*sdeo)))/ \
(1-(cb*cdeo*clt+sb*sdeo)**2)**(1.5)+(mud/(cb)*(-(sb*(-(cdeo*clt*sb)+cb*sdeo))-cb*(cb*cdeo*clt+sb*sdeo)))/(1-( \
cb*cdeo*clt+sb*sdeo)**2)**(0.5)+(mua*cdeo*(-(cdeo*clt*sb)+cb*sdeo)*(cb*cdeo*clt+sb*sdeo)*slt)/( \
1-(cb*cdeo*clt+sb*sdeo)**2)**(1.5)+(mud/(cb)*(sdeo-sb*(cb*cdeo*clt+sb*sdeo))*tanb)/(1-(cb*cdeo*clt+sb*sdeo)**2)**(0.5)
#mb-mua
jac2[4,3]=(cdeo*slt)/(1-(cb*cdeo*clt+sb*sdeo)**2)**(0.5)
#mb-mud
jac2[4,4]=(1/(cb)*(sdeo-sb*(cb*cdeo*clt+sb*sdeo)))/(1-(cb*cdeo*clt+sb*sdeo)**2)**(0.5)
#vrad-vrad
jac2[5,5]=1
"calculating ml and mb"
ml,mb=pmradec2lb(a,d,l,b,mua,mud)
#ml/mb -> uvw
#l-l
jac3[0,0]=1
#b-b
jac3[1,1]=1
#par-par
jac3[2,2]=1
#U-l
jac3[3,0]=-((const*ml*cl)/w)-vrad*cb*sl+(const*mb*sb*sl)/w
#U-b
jac3[3,1]=-((const*mb*cb*cl)/w)-vrad*cl*sb
#U-par
jac3[3,2]=(const*mb*cl*sb)/w**2+(const*ml*sl)/w**2
#U-ml
jac3[3,3]=-((const*sl)/w)
#U-mb
jac3[3,4]=-((const*cl*sb)/w)
#U-vrad
jac3[3,5]=cb*cl
#V-l
jac3[4,0]=vrad*cb*cl-(const*mb*cl*sb)/w-(const*ml*sl)/w
#V-b
jac3[4,1]=-((const*mb*cb*sl)/w)-vrad*sb*sl
#V-par
jac3[4,2]=-((const*ml*cl)/w**2)+(const*mb*sb*sl)/w**2
#V-ml
jac3[4,3]=(const*cl)/w
#V-mb
jac3[4,4]=-((const*sb*sl)/w)
#V-vrad
jac3[4,5]=cb*sl
#W-l
jac3[5,0]=0
#W-b
jac3[5,1]=vrad*cb-(const*mb*sb)/w
#W-par
jac3[5,2]=-((const*mb*cb)/w**2)
#W-ml
jac3[5,3]=0
#W-mb
jac3[5,4]=(const*cb)/w
#W-vrad
jac3[5,5]=sb
return np.dot(jac3,np.dot(jac2,np.dot(jac1,jac0)))
def Jacob4(x):
"""
Returns the Jacobian (4x4) of the transformation from ICRS to l,b,plx,U,V,W, ignoring positional errors
to increase speed.
The input parameters is an iterator of the form:
x[0]: right ascention (equatorial) -> degrees
x[1]: declination (equatorial) -> degrees
x[2]: parallax -> mas
x[3]: proper motion (mualpha*) -> mas/yr
x[4]: proper motion (mudelta) -> mas/yr
x[5]: radial velocity -> km/s
"""
#inicialization
jac0=np.zeros([4,4])
#jac1=np.zeros([4,4])
jac2=np.zeros([4,4])
jac3=np.zeros([4,4])
#RA & DEC in degrees
a=np.deg2rad(x[0])
d=np.deg2rad(x[1])
#Parallax in mas
w=x[2]
#proper motion in mas/yr
mua=x[3]
mud=x[4]
#radial velocity along l.o.s. in km/s
vrad=x[5]
#galactic coordinates in degrees
l,b=radec2lb(a,d)
cd=np.cos(d);sd=np.sin(d);tand=np.tan(d)
cb=np.cos(b); sb=np.sin(b);tanb=np.tan(b)
cl=np.cos(l); sl=np.sin(l)
cdeo=np.cos(deo); sdeo=np.sin(deo)
cincl=np.cos(incl); sincl=np.sin(incl)
calom=np.cos(a-alom); salom=np.sin(a-alom); talom=np.tan(a-alom)
clt=np.cos(l-theta); slt=np.sin(l-theta)
#mas -> rad
jac0[0,0]=1 #par
jac0[1,1]=1 #pmra
jac0[2,2]=1 #pmdec
jac0[3,3]=1 #vr
#from mua/mub to mul/mub
#par-par
jac2[0,0]=1
#ml-mua
jac2[1,1]=(1/(cb)*(sdeo-sb*(cb*cdeo*clt+sb*sdeo)))/(1-(cb*cdeo*clt+sb*sdeo)**2)**(0.5)
#ml-mud
jac2[1,2]=-((cdeo*slt)/(1-(cb*cdeo*clt+sb*sdeo)**2)**(0.5))
#mb-mua
jac2[2,1]=(cdeo*slt)/(1-(cb*cdeo*clt+sb*sdeo)**2)**(0.5)
#mb-mud
jac2[2,2]=(1/(cb)*(sdeo-sb*(cb*cdeo*clt+sb*sdeo)))/(1-(cb*cdeo*clt+sb*sdeo)**2)**(0.5)
#vrad-vrad
jac2[3,3]=1
"calculating ml and mb"
ml,mb=pmradec2lb(a,d,l,b,mua,mud)
#ml/mb -> uvw
#par-par
jac3[0,0]=1
#U-par
jac3[1,0]=(const*mb*cl*sb)/w**2+(const*ml*sl)/w**2
#U-ml
jac3[1,1]=-((const*sl)/w)
#U-mb
jac3[1,2]=-((const*cl*sb)/w)
#U-vrad
jac3[1,3]=cb*cl
#V-par
jac3[2,0]=-((const*ml*cl)/w**2)+(const*mb*sb*sl)/w**2
#V-ml
jac3[2,1]=(const*cl)/w
#V-mb
jac3[2,2]=-((const*sb*sl)/w)
#V-vrad
jac3[2,3]=cb*sl
#W-par
jac3[3,0]=-((const*mb*cb)/w**2)
#W-ml
jac3[3,1]=0
#W-mb
jac3[3,2]=(const*cb)/w
#W-vrad
jac3[3,3]=sb
return np.dot(jac3,np.dot(jac2,jac0))
| [
"numpy.arctan2",
"numpy.deg2rad",
"numpy.zeros",
"numpy.arcsin",
"numpy.sin",
"numpy.tan",
"numpy.cos",
"numpy.dot",
"numpy.sqrt"
] | [((198, 221), 'numpy.deg2rad', 'np.deg2rad', (['(62.87124882)'], {}), '(62.87124882)\n', (208, 221), True, 'import numpy as np\n'), ((261, 284), 'numpy.deg2rad', 'np.deg2rad', (['(282.8594813)'], {}), '(282.8594813)\n', (271, 284), True, 'import numpy as np\n'), ((319, 342), 'numpy.deg2rad', 'np.deg2rad', (['(32.93680516)'], {}), '(32.93680516)\n', (329, 342), True, 'import numpy as np\n'), ((408, 431), 'numpy.deg2rad', 'np.deg2rad', (['(27.12825111)'], {}), '(27.12825111)\n', (418, 431), True, 'import numpy as np\n'), ((451, 474), 'numpy.deg2rad', 'np.deg2rad', (['(192.8594813)'], {}), '(192.8594813)\n', (461, 474), True, 'import numpy as np\n'), ((492, 516), 'numpy.deg2rad', 'np.deg2rad', (['(122.93680516)'], {}), '(122.93680516)\n', (502, 516), True, 'import numpy as np\n'), ((836, 851), 'numpy.arcsin', 'np.arcsin', (['sinb'], {}), '(sinb)\n', (845, 851), True, 'import numpy as np\n'), ((1948, 1964), 'numpy.zeros', 'np.zeros', (['[6, 6]'], {}), '([6, 6])\n', (1956, 1964), True, 'import numpy as np\n'), ((1973, 1989), 'numpy.zeros', 'np.zeros', (['[6, 6]'], {}), '([6, 6])\n', (1981, 1989), True, 'import numpy as np\n'), ((1998, 2014), 'numpy.zeros', 'np.zeros', (['[6, 6]'], {}), '([6, 6])\n', (2006, 2014), True, 'import numpy as np\n'), ((2023, 2039), 'numpy.zeros', 'np.zeros', (['[6, 6]'], {}), '([6, 6])\n', (2031, 2039), True, 'import numpy as np\n'), ((2070, 2086), 'numpy.deg2rad', 'np.deg2rad', (['x[0]'], {}), '(x[0])\n', (2080, 2086), True, 'import numpy as np\n'), ((2093, 2109), 'numpy.deg2rad', 'np.deg2rad', (['x[1]'], {}), '(x[1])\n', (2103, 2109), True, 'import numpy as np\n'), ((2320, 2329), 'numpy.cos', 'np.cos', (['d'], {}), '(d)\n', (2326, 2329), True, 'import numpy as np\n'), ((2333, 2342), 'numpy.sin', 'np.sin', (['d'], {}), '(d)\n', (2339, 2342), True, 'import numpy as np\n'), ((2348, 2357), 'numpy.tan', 'np.tan', (['d'], {}), '(d)\n', (2354, 2357), True, 'import numpy as np\n'), ((2365, 2374), 'numpy.cos', 'np.cos', (['b'], {}), '(b)\n', (2371, 2374), True, 'import numpy as np\n'), ((2379, 2388), 'numpy.sin', 'np.sin', (['b'], {}), '(b)\n', (2385, 2388), True, 'import numpy as np\n'), ((2394, 2403), 'numpy.tan', 'np.tan', (['b'], {}), '(b)\n', (2400, 2403), True, 'import numpy as np\n'), ((2411, 2420), 'numpy.cos', 'np.cos', (['l'], {}), '(l)\n', (2417, 2420), True, 'import numpy as np\n'), ((2425, 2434), 'numpy.sin', 'np.sin', (['l'], {}), '(l)\n', (2431, 2434), True, 'import numpy as np\n'), ((2444, 2455), 'numpy.cos', 'np.cos', (['deo'], {}), '(deo)\n', (2450, 2455), True, 'import numpy as np\n'), ((2462, 2473), 'numpy.sin', 'np.sin', (['deo'], {}), '(deo)\n', (2468, 2473), True, 'import numpy as np\n'), ((2484, 2496), 'numpy.cos', 'np.cos', (['incl'], {}), '(incl)\n', (2490, 2496), True, 'import numpy as np\n'), ((2504, 2516), 'numpy.sin', 'np.sin', (['incl'], {}), '(incl)\n', (2510, 2516), True, 'import numpy as np\n'), ((2527, 2543), 'numpy.cos', 'np.cos', (['(a - alom)'], {}), '(a - alom)\n', (2533, 2543), True, 'import numpy as np\n'), ((2549, 2565), 'numpy.sin', 'np.sin', (['(a - alom)'], {}), '(a - alom)\n', (2555, 2565), True, 'import numpy as np\n'), ((2571, 2587), 'numpy.tan', 'np.tan', (['(a - alom)'], {}), '(a - alom)\n', (2577, 2587), True, 'import numpy as np\n'), ((2594, 2611), 'numpy.cos', 'np.cos', (['(l - theta)'], {}), '(l - theta)\n', (2600, 2611), True, 'import numpy as np\n'), ((2615, 2632), 'numpy.sin', 'np.sin', (['(l - theta)'], {}), '(l - theta)\n', (2621, 2632), True, 'import numpy as np\n'), ((7241, 7257), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (7249, 7257), True, 'import numpy as np\n'), ((7292, 7308), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (7300, 7308), True, 'import numpy as np\n'), ((7317, 7333), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (7325, 7333), True, 'import numpy as np\n'), ((7364, 7380), 'numpy.deg2rad', 'np.deg2rad', (['x[0]'], {}), '(x[0])\n', (7374, 7380), True, 'import numpy as np\n'), ((7387, 7403), 'numpy.deg2rad', 'np.deg2rad', (['x[1]'], {}), '(x[1])\n', (7397, 7403), True, 'import numpy as np\n'), ((7614, 7623), 'numpy.cos', 'np.cos', (['d'], {}), '(d)\n', (7620, 7623), True, 'import numpy as np\n'), ((7627, 7636), 'numpy.sin', 'np.sin', (['d'], {}), '(d)\n', (7633, 7636), True, 'import numpy as np\n'), ((7642, 7651), 'numpy.tan', 'np.tan', (['d'], {}), '(d)\n', (7648, 7651), True, 'import numpy as np\n'), ((7659, 7668), 'numpy.cos', 'np.cos', (['b'], {}), '(b)\n', (7665, 7668), True, 'import numpy as np\n'), ((7673, 7682), 'numpy.sin', 'np.sin', (['b'], {}), '(b)\n', (7679, 7682), True, 'import numpy as np\n'), ((7688, 7697), 'numpy.tan', 'np.tan', (['b'], {}), '(b)\n', (7694, 7697), True, 'import numpy as np\n'), ((7705, 7714), 'numpy.cos', 'np.cos', (['l'], {}), '(l)\n', (7711, 7714), True, 'import numpy as np\n'), ((7719, 7728), 'numpy.sin', 'np.sin', (['l'], {}), '(l)\n', (7725, 7728), True, 'import numpy as np\n'), ((7738, 7749), 'numpy.cos', 'np.cos', (['deo'], {}), '(deo)\n', (7744, 7749), True, 'import numpy as np\n'), ((7756, 7767), 'numpy.sin', 'np.sin', (['deo'], {}), '(deo)\n', (7762, 7767), True, 'import numpy as np\n'), ((7778, 7790), 'numpy.cos', 'np.cos', (['incl'], {}), '(incl)\n', (7784, 7790), True, 'import numpy as np\n'), ((7798, 7810), 'numpy.sin', 'np.sin', (['incl'], {}), '(incl)\n', (7804, 7810), True, 'import numpy as np\n'), ((7821, 7837), 'numpy.cos', 'np.cos', (['(a - alom)'], {}), '(a - alom)\n', (7827, 7837), True, 'import numpy as np\n'), ((7843, 7859), 'numpy.sin', 'np.sin', (['(a - alom)'], {}), '(a - alom)\n', (7849, 7859), True, 'import numpy as np\n'), ((7865, 7881), 'numpy.tan', 'np.tan', (['(a - alom)'], {}), '(a - alom)\n', (7871, 7881), True, 'import numpy as np\n'), ((7888, 7905), 'numpy.cos', 'np.cos', (['(l - theta)'], {}), '(l - theta)\n', (7894, 7905), True, 'import numpy as np\n'), ((7909, 7926), 'numpy.sin', 'np.sin', (['(l - theta)'], {}), '(l - theta)\n', (7915, 7926), True, 'import numpy as np\n'), ((944, 955), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (950, 955), True, 'import numpy as np\n'), ((993, 1004), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (999, 1004), True, 'import numpy as np\n'), ((1011, 1033), 'numpy.arctan2', 'np.arctan2', (['sinl', 'cosl'], {}), '(sinl, cosl)\n', (1021, 1033), True, 'import numpy as np\n'), ((1349, 1358), 'numpy.cos', 'np.cos', (['b'], {}), '(b)\n', (1355, 1358), True, 'import numpy as np\n'), ((3170, 3221), 'numpy.sqrt', 'np.sqrt', (['(1 - (cincl * sd - cd * salom * sincl) ** 2)'], {}), '(1 - (cincl * sd - cd * salom * sincl) ** 2)\n', (3177, 3221), True, 'import numpy as np\n'), ((9251, 9269), 'numpy.dot', 'np.dot', (['jac2', 'jac0'], {}), '(jac2, jac0)\n', (9257, 9269), True, 'import numpy as np\n'), ((764, 775), 'numpy.sin', 'np.sin', (['dec'], {}), '(dec)\n', (770, 775), True, 'import numpy as np\n'), ((776, 788), 'numpy.cos', 'np.cos', (['incl'], {}), '(incl)\n', (782, 788), True, 'import numpy as np\n'), ((814, 831), 'numpy.sin', 'np.sin', (['(ra - alom)'], {}), '(ra - alom)\n', (820, 831), True, 'import numpy as np\n'), ((965, 976), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (971, 976), True, 'import numpy as np\n'), ((977, 994), 'numpy.cos', 'np.cos', (['(ra - alom)'], {}), '(ra - alom)\n', (983, 994), True, 'import numpy as np\n'), ((1321, 1332), 'numpy.cos', 'np.cos', (['deo'], {}), '(deo)\n', (1327, 1332), True, 'import numpy as np\n'), ((1333, 1350), 'numpy.sin', 'np.sin', (['(ra - alpm)'], {}), '(ra - alpm)\n', (1339, 1350), True, 'import numpy as np\n'), ((1370, 1381), 'numpy.sin', 'np.sin', (['deo'], {}), '(deo)\n', (1376, 1381), True, 'import numpy as np\n'), ((1406, 1417), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (1412, 1417), True, 'import numpy as np\n'), ((1418, 1427), 'numpy.cos', 'np.cos', (['b'], {}), '(b)\n', (1424, 1427), True, 'import numpy as np\n'), ((3069, 3120), 'numpy.sqrt', 'np.sqrt', (['(1 - (cincl * sd - cd * salom * sincl) ** 2)'], {}), '(1 - (cincl * sd - cd * salom * sincl) ** 2)\n', (3076, 3120), True, 'import numpy as np\n'), ((6701, 6719), 'numpy.dot', 'np.dot', (['jac1', 'jac0'], {}), '(jac1, jac0)\n', (6707, 6719), True, 'import numpy as np\n'), ((789, 800), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (795, 800), True, 'import numpy as np\n'), ((801, 813), 'numpy.sin', 'np.sin', (['incl'], {}), '(incl)\n', (807, 813), True, 'import numpy as np\n'), ((877, 888), 'numpy.sin', 'np.sin', (['dec'], {}), '(dec)\n', (883, 888), True, 'import numpy as np\n'), ((889, 901), 'numpy.sin', 'np.sin', (['incl'], {}), '(incl)\n', (895, 901), True, 'import numpy as np\n'), ((927, 944), 'numpy.sin', 'np.sin', (['(ra - alom)'], {}), '(ra - alom)\n', (933, 944), True, 'import numpy as np\n'), ((1382, 1393), 'numpy.sin', 'np.sin', (['dec'], {}), '(dec)\n', (1388, 1393), True, 'import numpy as np\n'), ((1394, 1403), 'numpy.sin', 'np.sin', (['b'], {}), '(b)\n', (1400, 1403), True, 'import numpy as np\n'), ((902, 913), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (908, 913), True, 'import numpy as np\n'), ((914, 926), 'numpy.cos', 'np.cos', (['incl'], {}), '(incl)\n', (920, 926), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from .. import getisord
from libpysal.weights.distance import DistanceBand
from libpysal.common import pandas
POINTS = [(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
W = DistanceBand(POINTS, threshold=15)
Y = np.array([2, 3, 3.2, 5, 8, 7])
PANDAS_EXTINCT = pandas is None
class G_Tester(unittest.TestCase):
def setUp(self):
self.w = W
self.y = Y
np.random.seed(10)
def test_G(self):
g = getisord.G(self.y, self.w)
self.assertAlmostEqual(g.G, 0.55709779, places=8)
self.assertAlmostEqual(g.p_norm, 0.1729, places=4)
@unittest.skipIf(PANDAS_EXTINCT, "missing pandas")
def test_by_col(self):
import pandas as pd
df = pd.DataFrame(self.y, columns=["y"])
np.random.seed(12345)
r1 = getisord.G.by_col(df, ["y"], w=self.w)
this_getisord = np.unique(r1.y_g.values)
this_pval = np.unique(r1.y_p_sim.values)
np.random.seed(12345)
stat = getisord.G(self.y, self.w)
self.assertAlmostEqual(this_getisord, stat._statistic)
self.assertAlmostEqual(this_pval, stat.p_sim)
class G_Local_Tester(unittest.TestCase):
def setUp(self):
self.w = W
self.y = Y
np.random.seed(10)
def test_G_Local_Binary(self):
lg = getisord.G_Local(self.y, self.w, transform="B")
self.assertAlmostEqual(lg.Zs[0], -1.0136729, places=7)
self.assertAlmostEqual(lg.p_sim[0], 0.10100000000000001, places=7)
self.assertAlmostEqual(lg.p_z_sim[0], 0.154373052, places=7)
def test_G_Local_Row_Standardized(self):
lg = getisord.G_Local(self.y, self.w, transform="R")
self.assertAlmostEqual(lg.Zs[0], -0.62074534, places=7)
self.assertAlmostEqual(lg.p_sim[0], 0.10100000000000001, places=7)
def test_G_star_Local_Binary(self):
lg = getisord.G_Local(self.y, self.w, transform="B", star=True)
self.assertAlmostEqual(lg.Zs[0], -1.39727626, places=8)
self.assertAlmostEqual(lg.p_sim[0], 0.10100000000000001, places=7)
def test_G_star_Row_Standardized(self):
lg = getisord.G_Local(self.y, self.w, transform="R", star=True)
self.assertAlmostEqual(lg.Zs[0], -0.62488094, places=8)
self.assertAlmostEqual(lg.p_sim[0], 0.10100000000000001, places=7)
@unittest.skipIf(PANDAS_EXTINCT, "missing pandas")
def test_by_col(self):
import pandas as pd
df = pd.DataFrame(self.y, columns=["y"])
np.random.seed(12345)
r1 = getisord.G_Local.by_col(df, ["y"], w=self.w)
np.random.seed(12345)
stat = getisord.G_Local(self.y, self.w)
np.testing.assert_allclose(r1.y_g_local.values, stat.Gs)
np.testing.assert_allclose(r1.y_p_sim, stat.p_sim)
suite = unittest.TestSuite()
test_classes = [G_Tester, G_Local_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(suite)
| [
"pandas.DataFrame",
"unittest.skipIf",
"numpy.random.seed",
"unittest.TextTestRunner",
"unittest.TestSuite",
"numpy.array",
"unittest.TestLoader",
"libpysal.weights.distance.DistanceBand",
"numpy.testing.assert_allclose",
"numpy.unique"
] | [((221, 255), 'libpysal.weights.distance.DistanceBand', 'DistanceBand', (['POINTS'], {'threshold': '(15)'}), '(POINTS, threshold=15)\n', (233, 255), False, 'from libpysal.weights.distance import DistanceBand\n'), ((260, 290), 'numpy.array', 'np.array', (['[2, 3, 3.2, 5, 8, 7]'], {}), '([2, 3, 3.2, 5, 8, 7])\n', (268, 290), True, 'import numpy as np\n'), ((2804, 2824), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (2822, 2824), False, 'import unittest\n'), ((632, 681), 'unittest.skipIf', 'unittest.skipIf', (['PANDAS_EXTINCT', '"""missing pandas"""'], {}), "(PANDAS_EXTINCT, 'missing pandas')\n", (647, 681), False, 'import unittest\n'), ((2349, 2398), 'unittest.skipIf', 'unittest.skipIf', (['PANDAS_EXTINCT', '"""missing pandas"""'], {}), "(PANDAS_EXTINCT, 'missing pandas')\n", (2364, 2398), False, 'import unittest\n'), ((3007, 3032), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (3030, 3032), False, 'import unittest\n'), ((428, 446), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (442, 446), True, 'import numpy as np\n'), ((751, 786), 'pandas.DataFrame', 'pd.DataFrame', (['self.y'], {'columns': "['y']"}), "(self.y, columns=['y'])\n", (763, 786), True, 'import pandas as pd\n'), ((795, 816), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (809, 816), True, 'import numpy as np\n'), ((893, 917), 'numpy.unique', 'np.unique', (['r1.y_g.values'], {}), '(r1.y_g.values)\n', (902, 917), True, 'import numpy as np\n'), ((938, 966), 'numpy.unique', 'np.unique', (['r1.y_p_sim.values'], {}), '(r1.y_p_sim.values)\n', (947, 966), True, 'import numpy as np\n'), ((975, 996), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (989, 996), True, 'import numpy as np\n'), ((1266, 1284), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (1280, 1284), True, 'import numpy as np\n'), ((2468, 2503), 'pandas.DataFrame', 'pd.DataFrame', (['self.y'], {'columns': "['y']"}), "(self.y, columns=['y'])\n", (2480, 2503), True, 'import pandas as pd\n'), ((2512, 2533), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (2526, 2533), True, 'import numpy as np\n'), ((2600, 2621), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (2614, 2621), True, 'import numpy as np\n'), ((2678, 2734), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['r1.y_g_local.values', 'stat.Gs'], {}), '(r1.y_g_local.values, stat.Gs)\n', (2704, 2734), True, 'import numpy as np\n'), ((2743, 2793), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['r1.y_p_sim', 'stat.p_sim'], {}), '(r1.y_p_sim, stat.p_sim)\n', (2769, 2793), True, 'import numpy as np\n'), ((2898, 2919), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (2917, 2919), False, 'import unittest\n')] |
"""
Inspect a model with specific parameters defined in config_sandbox.py
"""
from __future__ import print_function
import visualutil
import setuputil
import yaml
import numpy
import lensutil
import os
from astropy.io import fits
from subprocess import call
import sample_vis
import uvutil
def plot(cleanup=True, configloc='sandbox.yaml', interactive=True, threshold=1.2, plot=True, tag='sandbox'):
'''
Parameters
----------
threshold: float
in mJy, cleaning threshold
'''
# read the input parameters
configfile = open(configloc, 'r')
config = yaml.load(configfile)
paramSetup = setuputil.loadParams(config)
fixindx = setuputil.fixParams(paramSetup)
testfit = paramSetup['p_l']
visfile = config['UVData']
uuu, vvv, www = uvutil.uvload(visfile)
pcd = uvutil.pcdload(visfile)
vis_complex, wgt = uvutil.visload(visfile)
# remove the data points with zero or negative weight
positive_definite = wgt > 0
vis_complex = vis_complex[positive_definite]
wgt = wgt[positive_definite]
uuu = uuu[positive_definite]
vvv = vvv[positive_definite]
testlnprob, testmu = lnprob(testfit, vis_complex, wgt, uuu, vvv, pcd,
fixindx, paramSetup, computeamp=True)
# prepend 1 dummy value to represent lnprob
testfit = numpy.append(testlnprob, testfit)
# append nmu dummy values that represent the magnification factors
nlensedsource = paramSetup['nlensedsource']
nlensedregions = paramSetup['nlensedregions']
nmu = 2 * (numpy.array(nlensedsource).sum() + nlensedregions)
for i in range(nmu):
testfit = numpy.append(testfit, 0)
print("lnprob: %f" %testlnprob)
print("Using the following model parameters:")
for k, v in zip(paramSetup['pname'], testfit[1:-4]):
print("%s : %.4f" %(k,v))
if plot:
visualutil.plotFit(config, testfit, threshold,
tag=tag, cleanup=cleanup,
interactive=interactive)
return testlnprob
def lnprior(pzero_regions, paramSetup):
"""
Function that computes the ln prior probabilities of the model parameters.
"""
priorln = 0.0
mu = 1
# ensure all parameters are finite
if (pzero_regions * 0 != 0).any():
priorln = -numpy.inf
return priorln, mu
# Uniform priors
uniform_regions = paramSetup['PriorShape'] == 'Uniform'
if uniform_regions.any():
p_l_regions = paramSetup['p_l'][uniform_regions]
p_u_regions = paramSetup['p_u'][uniform_regions]
pzero_uniform = pzero_regions[uniform_regions]
if (pzero_uniform > p_l_regions).all() and (pzero_uniform < p_u_regions).all():
# log prior
# priorln += numpy.log(1.0/numpy.abs(p_l_regions - p_u_regions)).sum()
priorln = 0.0
else:
priorln = -numpy.inf
return priorln, mu
# Gaussian priors
gaussian_regions = paramSetup['PriorShape'] == 'Gaussian'
if gaussian_regions.any():
import scipy.stats as stats
# initlized as [mean, blah, blah, sigma]
mean_regions = paramSetup['p_l'][gaussian_regions]
rms_regions = paramSetup['p_u'][gaussian_regions]
pzero_gauss = pzero_regions[gaussian_regions]
priorln += numpy.log(stats.norm(scale=rms_regions, loc=mean_regions).pdf(pzero_gauss)).sum()
# Gaussian pos (for parameter that must be positive e.g. flux density)
gaussPos_regions = paramSetup['PriorShape'] == 'GaussianPos'
if gaussPos_regions.any():
pzero_gaussPos = pzero_regions[gaussPos_regions]
if pzero_gaussPos < 0.0:
priorln = -numpy.inf
return priorln, mu
else:
import scipy.stats as stats
# initlized as [mean, blah, blah, sigma]
mean_regions = paramSetup['p_l'][gaussPos_regions]
rms_regions = paramSetup['p_u'][gaussPos_regions]
priorln += numpy.log(stats.norm(scale=rms_regions, loc=mean_regions).pdf(pzero_gauss)).sum()
# if not isinstance(priorln, float):
# priorln = priorln.sum()
return priorln, mu
def lnlike(pzero_regions, vis_complex, wgt, uuu, vvv, pcd,
fixindx, paramSetup, computeamp=True, miriad=False):
""" Function that computes the Ln likelihood of the data"""
# search poff_models for parameters fixed relative to other parameters
fixed = (numpy.where(fixindx >= 0))[0]
nfixed = fixindx[fixed].size
p_u_regions = paramSetup['p_u']
poff_regions = p_u_regions.copy()
poff_regions[:] = 0.
#for ifix in range(nfixed):
# poff_regions[fixed[ifix]] = pzero_regions[fixindx[fixed[ifix]]]
for ifix in range(nfixed):
ifixed = fixed[ifix]
subindx = int(fixindx[ifixed])
par0 = 0
if fixindx[subindx] > 0:
par0 = pzero_regions[fixindx[subindx]]
poff_regions[ifixed] = pzero_regions[subindx] + par0
parameters_regions = pzero_regions + poff_regions
npar_previous = 0
amp = [] # Will contain the 'blobs' we compute
g_image_all = 0.
g_lensimage_all = 0.
e_image_all = 0.
e_lensimage_all = 0.
nregions = paramSetup['nregions']
for regioni in range(nregions):
# get the model info for this model
x = paramSetup['x'][regioni]
y = paramSetup['y'][regioni]
headmod = paramSetup['modelheader'][regioni]
nlens = paramSetup['nlens_regions'][regioni]
nsource = paramSetup['nsource_regions'][regioni]
model_types = paramSetup['model_types'][regioni]
# get pzero, p_u, and p_l for this specific model
nparlens = 5 * nlens
nparsource = 6 * nsource
npar = nparlens + nparsource + npar_previous
parameters = parameters_regions[npar_previous:npar]
npar_previous = npar
#-----------------------------------------------------------------
# Create a surface brightness map of lensed emission for the given set
# of foreground lens(es) and background source parameters.
#-----------------------------------------------------------------
g_image, g_lensimage, e_image, e_lensimage, amp_tot, amp_mask = \
lensutil.sbmap(x, y, nlens, nsource, parameters, model_types,
computeamp=computeamp)
e_image_all += e_image
e_lensimage_all += e_lensimage
g_image_all += g_image
g_lensimage_all += g_lensimage
amp.extend(amp_tot)
amp.extend(amp_mask)
# --------------------------------------------------------------------
# Python version of UVMODEL:
# "Observe" the lensed emission with the interferometer
# --------------------------------------------------------------------
if nlens > 0:
if computeamp:
# Evaluate amplification for each region
lensmask = e_lensimage != 0
mask = e_image != 0
numer = g_lensimage[lensmask].sum()
denom = g_image[mask].sum()
amp_mask = numer / denom
numer = g_lensimage.sum()
denom = g_image.sum()
amp_tot = numer / denom
if amp_tot > 1e2:
amp_tot = 1e2
if amp_mask > 1e2:
amp_mask = 1e2
amp.extend([amp_tot])
amp.extend([amp_mask])
else:
amp.extend([1.0])
amp.extend([1.0])
if miriad:
# save the fits image of the lensed source
ptag = str(os.getpid())
SBmapLoc = 'LensedSBmap' + ptag + '.fits'
fits.writeto(SBmapLoc, g_lensimage_all, header=headmod, clobber=True)
# convert fits format to miriad format
SBmapMiriad = 'LensedSBmap' + ptag + '.miriad'
os.system('rm -rf ' + SBmapMiriad)
cmd = 'fits op=xyin in=' + SBmapLoc + ' out=' + SBmapMiriad
call(cmd + ' > /dev/null 2>&1', shell=True)
# compute simulated visibilities
modelvisfile = 'SimulatedVisibilities' + ptag + '.miriad'
call('rm -rf ' + modelvisfile, shell=True)
cmd = 'uvmodel options=subtract vis=' + visfilemiriad + \
' model=' + SBmapMiriad + ' out=' + modelvisfile
call(cmd + ' > /dev/null 2>&1', shell=True)
# convert simulated visibilities to uvfits format
mvuvfits = 'SimulatedVisibilities' + ptag + '.uvfits'
call('rm -rf ' + mvuvfits, shell=True)
cmd = 'fits op=uvout in=' + modelvisfile + ' out=' + mvuvfits
call(cmd + ' > /dev/null 2>&1', shell=True)
# read simulated visibilities
mvuv = fits.open(mvuvfits)
diff_real = mvuv[0].data['DATA'][:, 0, 0, 0, 0, 0]
diff_imag = mvuv[0].data['DATA'][:, 0, 0, 0, 0, 1]
wgt = mvuv[0].data['DATA'][:, 0, 0, 0, 0, 2]
#model_complex = model_real[goodvis] + 1.0j * model_imag[goodvis]
diff_all = numpy.append(diff_real, diff_imag)
wgt = numpy.append(wgt, wgt)
goodvis = wgt > 0
diff_all = diff_all[goodvis]
wgt = wgt[goodvis]
chi2_all = wgt * diff_all * diff_all
else:
model_complex = sample_vis.uvmodel(g_lensimage_all, headmod,
uuu, vvv, pcd)
# print(vis_complex.shape, model_complex.shape) # remove
diff_all = numpy.abs(vis_complex - model_complex)
chi2_all = wgt * diff_all * diff_all
#model_real += numpy.real(model_complex)
#model_imag += numpy.imag(model_complex)
#fits.writeto('g_lensimage.fits', g_lensimage_all, headmod, clobber=True)
#import matplotlib.pyplot as plt
#print(pzero_regions)
#plt.imshow(g_lensimage, origin='lower')
#plt.colorbar()
#plt.show()
#plt.imshow(g_image, origin='lower')
#plt.colorbar()
#plt.show()
# calculate chi^2 assuming natural weighting
#fnuisance = 0.0
#modvariance_real = 1 / wgt #+ fnuisance ** 2 * model_real ** 2
#modvariance_imag = 1 / wgt #+ fnuisance ** 2 * model_imag ** 2
#wgt = wgt / 4.
#chi2_real_all = (real - model_real) ** 2. / modvariance_real
#chi2_imag_all = (imag - model_imag) ** 2. / modvariance_imag
#chi2_all = numpy.append(chi2_real_all, chi2_imag_all)
# compute the sigma term
#sigmaterm_real = numpy.log(2 * numpy.pi / wgt)
#sigmaterm_imag = numpy.log(2 * numpy.pi * modvariance_imag)
# compute the ln likelihood
lnlikemethod = paramSetup['lnlikemethod']
if lnlikemethod == 'chi2':
lnlike = chi2_all
else:
# by definition, loglike = -n/2*ln(2pi sigma^2) - 1/(2sigma^2) sum of (data-model)^2 over i=1 to n; but the constant term doesn't matter
sigmaterm_all = len(wgt) * numpy.log(2 * numpy.pi / wgt)
lnlike = chi2_all # + sigmaterm_all
# * -1/2 factor in latter step
# compute number of degrees of freedom
#nmeasure = lnlike.size
#nparam = (pzero != 0).size
#ndof = nmeasure - nparam
# assert that lnlike is equal to -1 * maximum likelihood estimate
# use visibilities where weight is greater than 0
#goodvis = wgt > 0
#likeln = -0.5 * lnlike[goodvis].sum()
likeln = -0.5 * lnlike.sum()
#print(pcd, likeln)
if likeln * 0 != 0:
likeln = -numpy.inf
return likeln, amp
def lnprob(pzero_regions, vis_complex, wgt, uuu, vvv, pcd,
fixindx, paramSetup, computeamp=True):
"""
Computes ln probabilities via ln prior + ln likelihood
"""
lp, mu = lnprior(pzero_regions, paramSetup)
if not numpy.isfinite(lp):
probln = -numpy.inf
mu = 1
return probln, mu
ll, mu = lnlike(pzero_regions, vis_complex, wgt, uuu, vvv, pcd,
fixindx, paramSetup, computeamp=computeamp)
normalization = 1.0#2 * real.size
probln = lp * normalization + ll
return probln, mu
| [
"yaml.load",
"numpy.abs",
"sample_vis.uvmodel",
"scipy.stats.norm",
"numpy.isfinite",
"numpy.append",
"lensutil.sbmap",
"setuputil.loadParams",
"uvutil.uvload",
"uvutil.pcdload",
"os.system",
"subprocess.call",
"astropy.io.fits.open",
"os.getpid",
"numpy.log",
"astropy.io.fits.writeto"... | [((594, 615), 'yaml.load', 'yaml.load', (['configfile'], {}), '(configfile)\n', (603, 615), False, 'import yaml\n'), ((634, 662), 'setuputil.loadParams', 'setuputil.loadParams', (['config'], {}), '(config)\n', (654, 662), False, 'import setuputil\n'), ((677, 708), 'setuputil.fixParams', 'setuputil.fixParams', (['paramSetup'], {}), '(paramSetup)\n', (696, 708), False, 'import setuputil\n'), ((792, 814), 'uvutil.uvload', 'uvutil.uvload', (['visfile'], {}), '(visfile)\n', (805, 814), False, 'import uvutil\n'), ((825, 848), 'uvutil.pcdload', 'uvutil.pcdload', (['visfile'], {}), '(visfile)\n', (839, 848), False, 'import uvutil\n'), ((872, 895), 'uvutil.visload', 'uvutil.visload', (['visfile'], {}), '(visfile)\n', (886, 895), False, 'import uvutil\n'), ((1321, 1354), 'numpy.append', 'numpy.append', (['testlnprob', 'testfit'], {}), '(testlnprob, testfit)\n', (1333, 1354), False, 'import numpy\n'), ((1634, 1658), 'numpy.append', 'numpy.append', (['testfit', '(0)'], {}), '(testfit, 0)\n', (1646, 1658), False, 'import numpy\n'), ((1859, 1960), 'visualutil.plotFit', 'visualutil.plotFit', (['config', 'testfit', 'threshold'], {'tag': 'tag', 'cleanup': 'cleanup', 'interactive': 'interactive'}), '(config, testfit, threshold, tag=tag, cleanup=cleanup,\n interactive=interactive)\n', (1877, 1960), False, 'import visualutil\n'), ((4430, 4455), 'numpy.where', 'numpy.where', (['(fixindx >= 0)'], {}), '(fixindx >= 0)\n', (4441, 4455), False, 'import numpy\n'), ((6246, 6335), 'lensutil.sbmap', 'lensutil.sbmap', (['x', 'y', 'nlens', 'nsource', 'parameters', 'model_types'], {'computeamp': 'computeamp'}), '(x, y, nlens, nsource, parameters, model_types, computeamp=\n computeamp)\n', (6260, 6335), False, 'import lensutil\n'), ((7706, 7775), 'astropy.io.fits.writeto', 'fits.writeto', (['SBmapLoc', 'g_lensimage_all'], {'header': 'headmod', 'clobber': '(True)'}), '(SBmapLoc, g_lensimage_all, header=headmod, clobber=True)\n', (7718, 7775), False, 'from astropy.io import fits\n'), ((7887, 7921), 'os.system', 'os.system', (["('rm -rf ' + SBmapMiriad)"], {}), "('rm -rf ' + SBmapMiriad)\n", (7896, 7921), False, 'import os\n'), ((7998, 8041), 'subprocess.call', 'call', (["(cmd + ' > /dev/null 2>&1')"], {'shell': '(True)'}), "(cmd + ' > /dev/null 2>&1', shell=True)\n", (8002, 8041), False, 'from subprocess import call\n'), ((8158, 8200), 'subprocess.call', 'call', (["('rm -rf ' + modelvisfile)"], {'shell': '(True)'}), "('rm -rf ' + modelvisfile, shell=True)\n", (8162, 8200), False, 'from subprocess import call\n'), ((8340, 8383), 'subprocess.call', 'call', (["(cmd + ' > /dev/null 2>&1')"], {'shell': '(True)'}), "(cmd + ' > /dev/null 2>&1', shell=True)\n", (8344, 8383), False, 'from subprocess import call\n'), ((8513, 8551), 'subprocess.call', 'call', (["('rm -rf ' + mvuvfits)"], {'shell': '(True)'}), "('rm -rf ' + mvuvfits, shell=True)\n", (8517, 8551), False, 'from subprocess import call\n'), ((8630, 8673), 'subprocess.call', 'call', (["(cmd + ' > /dev/null 2>&1')"], {'shell': '(True)'}), "(cmd + ' > /dev/null 2>&1', shell=True)\n", (8634, 8673), False, 'from subprocess import call\n'), ((8728, 8747), 'astropy.io.fits.open', 'fits.open', (['mvuvfits'], {}), '(mvuvfits)\n', (8737, 8747), False, 'from astropy.io import fits\n'), ((9012, 9046), 'numpy.append', 'numpy.append', (['diff_real', 'diff_imag'], {}), '(diff_real, diff_imag)\n', (9024, 9046), False, 'import numpy\n'), ((9061, 9083), 'numpy.append', 'numpy.append', (['wgt', 'wgt'], {}), '(wgt, wgt)\n', (9073, 9083), False, 'import numpy\n'), ((9253, 9312), 'sample_vis.uvmodel', 'sample_vis.uvmodel', (['g_lensimage_all', 'headmod', 'uuu', 'vvv', 'pcd'], {}), '(g_lensimage_all, headmod, uuu, vvv, pcd)\n', (9271, 9312), False, 'import sample_vis\n'), ((9416, 9454), 'numpy.abs', 'numpy.abs', (['(vis_complex - model_complex)'], {}), '(vis_complex - model_complex)\n', (9425, 9454), False, 'import numpy\n'), ((11604, 11622), 'numpy.isfinite', 'numpy.isfinite', (['lp'], {}), '(lp)\n', (11618, 11622), False, 'import numpy\n'), ((7635, 7646), 'os.getpid', 'os.getpid', ([], {}), '()\n', (7644, 7646), False, 'import os\n'), ((10781, 10810), 'numpy.log', 'numpy.log', (['(2 * numpy.pi / wgt)'], {}), '(2 * numpy.pi / wgt)\n', (10790, 10810), False, 'import numpy\n'), ((1539, 1565), 'numpy.array', 'numpy.array', (['nlensedsource'], {}), '(nlensedsource)\n', (1550, 1565), False, 'import numpy\n'), ((3318, 3365), 'scipy.stats.norm', 'stats.norm', ([], {'scale': 'rms_regions', 'loc': 'mean_regions'}), '(scale=rms_regions, loc=mean_regions)\n', (3328, 3365), True, 'import scipy.stats as stats\n'), ((3981, 4028), 'scipy.stats.norm', 'stats.norm', ([], {'scale': 'rms_regions', 'loc': 'mean_regions'}), '(scale=rms_regions, loc=mean_regions)\n', (3991, 4028), True, 'import scipy.stats as stats\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import shutil
from collections.abc import Iterable
from .nas_utils import find_pareto_front, NASMethods
from .search_algorithms import BayesianOptimizationSearcher, GridSearcher, RandomSearcher
from neural_compressor.conf.config import Conf
from neural_compressor.utils.utility import logger, LazyImport
torch = LazyImport('torch')
class NAS(object):
def __new__(self, conf_fname, *args, **kwargs):
if isinstance(conf_fname, str):
if os.path.isfile(conf_fname):
self.conf = Conf(conf_fname).usr_cfg
else: # pragma: no cover
raise NotImplementedError(
"Please provide a str path to the config file."
)
assert self.conf.nas is not None, "nas section must be set"
if isinstance(self.conf.nas.approach, str) and \
self.conf.nas.approach.lower() in NASMethods:
method = self.conf.nas.approach.lower()
else:
logger.warning(
"NAS approach not set in config, use default NAS approach, i.e. Basic."
)
method = 'basic'
return NASMethods[method](conf_fname, *args, **kwargs)
class NASBase(object):
"""
Args:
search_space (dict): A dictionary for defining the search space.
model_builder (function obj): A function to build model instance with the specified
model architecture parameters.
"""
def __init__(self, search_space=None, model_builder=None):
super().__init__()
self._search_space = search_space
self._model_builder = model_builder
self._search_algorithm = None
self.search_results = {}
self.best_model_archs = None
self.seed = None
def select_model_arch(self):
"""Propose architecture of the model based on search algorithm for next search iteration.
Returns:
Model architecture description.
"""
model_arch_paras = self._search_algorithm.suggest()
assert self.search_space_keys and isinstance(model_arch_paras, dict) and \
self.search_space_keys == list(model_arch_paras.keys()), \
"Keys of model_arch_paras should be the same with search_space_keys."
return model_arch_paras
def search(self, res_save_path=None):
"""NAS search process.
Returns:
Best model architecture found in search process.
"""
assert self.model_builder is not None, \
"Must specify model_builder for generating model instance by model architecture."
if res_save_path is None or not os.path.isdir(res_save_path):
res_save_path = os.getcwd()
save_path = os.path.join(res_save_path, 'NASResults')
self.model_paras_num = {}
self.load_search_results(save_path)
os.makedirs(save_path, exist_ok=True)
for i in range(self.max_trials):
logger.info(
"{fix} Trial {n} starts, {r} trials to go {fix}".format(
n=i+1, r=self.max_trials-i-1, fix="="*30
)
)
model_arch_paras = self.select_model_arch()
logger.info("Model architecture {} proposed.".format(model_arch_paras))
model = self._model_builder(model_arch_paras)
model_paras = self.count_model_parameters(model)
logger.info(
"***** Number of model parameters: {:.2f}M *****".format(model_paras / 10**6)
)
self.model_paras_num[tuple(model_arch_paras.values())] = model_paras
if tuple(model_arch_paras.values()) in self.search_results:
logger.info("Skip evaluated model architecture {}.".format(model_arch_paras))
continue
if tuple(model_arch_paras.values()) in self.resumed_search_results:
logger.info(
"Find previous results of model architecture: {}.".format(model_arch_paras)
)
metrics = self.resumed_search_results[tuple(model_arch_paras.values())]
else:
logger.info("Assessing model architecture: {}.".format(model_arch_paras))
metrics = self.estimate(model)
logger.info(
"Metrics of model architecture {} is {}.".format(model_arch_paras, metrics)
)
self.search_results[tuple(model_arch_paras.values())] = metrics
self._search_algorithm.get_feedback(sum(self.metrics_conversion(metrics)))
self.dump_search_results(
os.path.join(save_path, 'Trial_{}_results.txt'.format(i+1))
)
for model_arch_vec in self.resumed_search_results:
if model_arch_vec not in self.search_results:
self.search_results[model_arch_vec] = \
self.resumed_search_results[model_arch_vec]
model = self._model_builder(self.params_vec2params_dict(model_arch_vec))
self.model_paras_num[model_arch_vec] = self.count_model_parameters(model)
self.dump_search_results(os.path.join(save_path, 'Final_results.txt'.format(i+1)))
self.find_best_model_archs()
logger.info(
"{fix} Found {n} best model architectures {fix}".format(
n=len(self.best_model_archs), fix="="*30
)
)
for i, model_arch in enumerate(self.best_model_archs):
logger.info("Best model architecture {}: {}".format(i+1, model_arch))
return self.best_model_archs
def estimate(self, model): # pragma: no cover
"""Estimate performance of the model. Depends on specific NAS algorithm.
Returns:
Evaluated metrics of the model.
"""
raise NotImplementedError("Depends on specific NAS algorithm.")
def count_model_parameters(self, model):
if isinstance(model, torch.nn.Module):
return sum(p.numel() for p in model.parameters())
else:
raise NotImplementedError("Only support torch model now.") # pragma: no cover
def load_search_results(self, path):
self.resumed_search_results = {}
lastest_results_record = os.path.join(path, 'lastest_results.npy')
if not os.path.exists(path) or not os.path.exists(lastest_results_record):
return
self.resumed_search_results = np.load(lastest_results_record, allow_pickle=True).item()
os.makedirs(os.path.join(path, 'previous_results'), exist_ok=True)
for f in os.listdir(path):
if os.path.isfile(os.path.join(path, f)):
shutil.move(os.path.join(path, f), os.path.join(path, 'previous_results', f))
logger.info("Loaded previous results.")
def dump_search_results(self, path):
lastest_results_record = os.path.join(os.path.dirname(path), 'lastest_results.npy')
np.save(lastest_results_record, self.search_results, allow_pickle=True)
write_contents = '=' * 30 + ' All Search Results ' + '=' * 30 + '\n\n'
for model_arch_vec in self.search_results:
tmp = ','.join(['{}_{}'.format(k, v) \
for k, v in zip(self.search_space_keys, model_arch_vec)])
write_contents += '{}: {} Paras: {}M\n'.format(
tmp, self.search_results[model_arch_vec],
self.model_paras_num[model_arch_vec] / 10**6
)
write_contents += '\n\n\n' + '=' * 30 + ' Best Search Results ' + '=' * 30 + '\n\n'
self.find_best_model_archs()
for i, model_arch in enumerate(self.best_model_archs):
model_arch_vec = tuple(model_arch.values())
tmp = ','.join(['{}_{}'.format(k, v) \
for k, v in zip(self.search_space_keys, model_arch_vec)])
write_contents += \
'{}. {}: {} Paras: {}M\n'.format(
i+1, tmp, self.search_results[model_arch_vec],
self.model_paras_num[model_arch_vec] / 10**6
)
with open(path, mode='w') as f:
f.write(write_contents)
def params_vec2params_dict(self, paras_vec):
assert len(paras_vec) == len(self.search_space_keys), \
"Length of paras_vec and search_space_keys should be the same."
return {k:v for k, v in zip(self.search_space_keys, paras_vec)}
def find_best_model_archs(self):
assert len(self.search_results) > 0, "Zero result in search_results."
model_arches = list(self.search_results.keys())
metrics = [self.metrics_conversion(self.search_results[ma]) for ma in model_arches]
pareto_front_indices = find_pareto_front(metrics)
self.best_model_archs = [self.params_vec2params_dict(model_arches[i]) \
for i in pareto_front_indices]
def metrics_conversion(self, metrics):
if not isinstance(metrics, Iterable):
metrics = [metrics]
if isinstance(metrics, dict):
if self.metrics is None:
self.metrics = list(metrics.keys())
assert list(metrics.keys()) == list(self.metrics), \
"Keys of metrics not match with metrics in the configuration."
metrics = list(metrics.values())
if self.higher_is_better is None:
self.higher_is_better = [True,] * len(metrics)
logger.warning("higher_is_better not set in the configuration, " + \
"set it to all True for every metric entry by default.")
converted_metrics = [metric if higher_is_better else -metric \
for metric, higher_is_better in zip(metrics, self.higher_is_better)]
return converted_metrics
def init_search_cfg(self, config):
self.search_cfg = config.search
if not self._search_space:
self._search_space = self.search_cfg.search_space
else:
logger.warning(
"Use user provided search space {}, instead of search space "
"defined in the config, i.e. {}.".format(
self._search_space, self.search_cfg.search_space
)
)
assert isinstance(self._search_space, dict) and len(self._search_space) > 0, \
"Must provide a dict as search_space for NAS."
self.search_space_keys = sorted(self.search_space.keys())
for k in self.search_space_keys:
assert isinstance(self.search_space[k], (list, tuple)), \
"Value of key \'{}\' must be a list or tuple".format(k)
self.metrics = self.search_cfg.metrics \
if self.search_cfg.metrics else None
self.higher_is_better = self.search_cfg.higher_is_better \
if self.search_cfg.higher_is_better else None
self.seed = self.search_cfg.seed
self.max_trials = self.search_cfg.max_trials \
if self.search_cfg.max_trials is not None else 3 # set default 3 for max_trials
self.search_algorithm_type = self.search_cfg.search_algorithm \
if self.search_cfg.search_algorithm else None
if not self.search_algorithm_type:
self._search_algorithm = BayesianOptimizationSearcher(self.search_space, self.seed)
elif self.search_algorithm_type.lower() == 'grid':
self._search_algorithm = GridSearcher(self.search_space)
elif self.search_algorithm_type.lower() == 'random':
self._search_algorithm = RandomSearcher(self.search_space, self.seed)
elif self.search_algorithm_type.lower() == 'bo':
self._search_algorithm = BayesianOptimizationSearcher(self.search_space, self.seed)
else: # pragma: no cover
raise NotImplementedError(
'Unsupported \'{}\' search algorithm'.format(self.search_algorithm_type)
)
@property
def search_space(self):
return self._search_space
@search_space.setter
def search_space(self, search_space):
self._search_space = search_space
@property
def search_algorithm(self):
return self._search_algorithm
@search_algorithm.setter
def search_algorithm(self, search_algorithm):
self._search_algorithm = search_algorithm
@property
def model_builder(self):
return self._model_builder
@model_builder.setter
def model_builder(self, model_builder):
self._model_builder = model_builder
def __repr__(self):
return 'Base Class of NAS' # pragma: no cover | [
"numpy.load",
"numpy.save",
"os.makedirs",
"os.getcwd",
"os.path.isdir",
"neural_compressor.utils.utility.LazyImport",
"neural_compressor.utils.utility.logger.warning",
"os.path.dirname",
"os.path.exists",
"neural_compressor.utils.utility.logger.info",
"os.path.isfile",
"neural_compressor.conf... | [((975, 994), 'neural_compressor.utils.utility.LazyImport', 'LazyImport', (['"""torch"""'], {}), "('torch')\n", (985, 994), False, 'from neural_compressor.utils.utility import logger, LazyImport\n'), ((3368, 3409), 'os.path.join', 'os.path.join', (['res_save_path', '"""NASResults"""'], {}), "(res_save_path, 'NASResults')\n", (3380, 3409), False, 'import os\n'), ((3496, 3533), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (3507, 3533), False, 'import os\n'), ((6870, 6911), 'os.path.join', 'os.path.join', (['path', '"""lastest_results.npy"""'], {}), "(path, 'lastest_results.npy')\n", (6882, 6911), False, 'import os\n'), ((7202, 7218), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (7212, 7218), False, 'import os\n'), ((7376, 7415), 'neural_compressor.utils.utility.logger.info', 'logger.info', (['"""Loaded previous results."""'], {}), "('Loaded previous results.')\n", (7387, 7415), False, 'from neural_compressor.utils.utility import logger, LazyImport\n'), ((7558, 7629), 'numpy.save', 'np.save', (['lastest_results_record', 'self.search_results'], {'allow_pickle': '(True)'}), '(lastest_results_record, self.search_results, allow_pickle=True)\n', (7565, 7629), True, 'import numpy as np\n'), ((1123, 1149), 'os.path.isfile', 'os.path.isfile', (['conf_fname'], {}), '(conf_fname)\n', (1137, 1149), False, 'import os\n'), ((1615, 1707), 'neural_compressor.utils.utility.logger.warning', 'logger.warning', (['"""NAS approach not set in config, use default NAS approach, i.e. Basic."""'], {}), "(\n 'NAS approach not set in config, use default NAS approach, i.e. Basic.')\n", (1629, 1707), False, 'from neural_compressor.utils.utility import logger, LazyImport\n'), ((3336, 3347), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3345, 3347), False, 'import os\n'), ((7130, 7168), 'os.path.join', 'os.path.join', (['path', '"""previous_results"""'], {}), "(path, 'previous_results')\n", (7142, 7168), False, 'import os\n'), ((7504, 7525), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (7519, 7525), False, 'import os\n'), ((10013, 10140), 'neural_compressor.utils.utility.logger.warning', 'logger.warning', (["('higher_is_better not set in the configuration, ' +\n 'set it to all True for every metric entry by default.')"], {}), "('higher_is_better not set in the configuration, ' +\n 'set it to all True for every metric entry by default.')\n", (10027, 10140), False, 'from neural_compressor.utils.utility import logger, LazyImport\n'), ((3278, 3306), 'os.path.isdir', 'os.path.isdir', (['res_save_path'], {}), '(res_save_path)\n', (3291, 3306), False, 'import os\n'), ((6927, 6947), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6941, 6947), False, 'import os\n'), ((6955, 6993), 'os.path.exists', 'os.path.exists', (['lastest_results_record'], {}), '(lastest_results_record)\n', (6969, 6993), False, 'import os\n'), ((7052, 7102), 'numpy.load', 'np.load', (['lastest_results_record'], {'allow_pickle': '(True)'}), '(lastest_results_record, allow_pickle=True)\n', (7059, 7102), True, 'import numpy as np\n'), ((7250, 7271), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (7262, 7271), False, 'import os\n'), ((1179, 1195), 'neural_compressor.conf.config.Conf', 'Conf', (['conf_fname'], {}), '(conf_fname)\n', (1183, 1195), False, 'from neural_compressor.conf.config import Conf\n'), ((7302, 7323), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (7314, 7323), False, 'import os\n'), ((7325, 7366), 'os.path.join', 'os.path.join', (['path', '"""previous_results"""', 'f'], {}), "(path, 'previous_results', f)\n", (7337, 7366), False, 'import os\n')] |
## @ingroupMethods-Noise-Fidelity_One-Airframe
# noise_clean_wing.py
#
# Created: Jun 2015, <NAME>
# Modified: Jan 2016, <NAME>
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import numpy as np
from SUAVE.Core import Units
# ----------------------------------------------------------------------
# Compute the clean wing noise
# ----------------------------------------------------------------------
## @ingroupMethods-Noise-Fidelity_One-Airframe
def noise_clean_wing(S,b,ND,IsHorz,velocity,viscosity,M,phi,theta,distance,frequency):
""" This computes the 1/3 octave band sound pressure level and the overall sound pressure level from the clean wing,
for a wing with area S (sq.ft) and span b (ft). ND is a constant set to 0 for clean wings and set to 1 for propeller
airplanes, jet transports with numerous large trailing edge flap tracks, flaps extended, or slats extended. ISHORZ must be set to 1.
This function can be used for the horizontal tail by inserting the appropriate tail area and span. For a vertical tail, its appropriate
area and height are used and ISHORZ must be set to 0.
Assumptions:
Correlation based.
Source:
SAE Model
Inputs:
S - Wing Area [sq.ft]
b - Wing Span [ft]
ND - Costant from the method
IsHoriz - Costant from the method
deltaw - Wing Turbulent Boundary Layer thickness [ft]
velocity - Aircraft speed [kts]
viscosity - Dynamic viscosity
M - Mach number
phi - Azimuthal angle [rad]
theta - Polar angle [rad]
distance - Distance from airplane to observer, evaluated at retarded time [ft]
frequency - Frequency array [Hz]
Outputs: One Third Octave Band SPL [dB]
SPL - Sound Pressure Level of the clean wing [dB]
OASPL - Overall Sound Pressure Level of the clean wing [dB]
Properties Used:
None
"""
delta = 0.37*(S/b)*(velocity/Units.ft*S/(b*viscosity))**(-0.2)
if IsHorz==1:
DIR = np.cos(phi)
elif IsHorz==0:
DIR = np.sin(phi)
if DIR==0:
SPL = np.zeros(24)
else:
fmax = 0.1*(velocity/Units.ft)/(delta*(1-M*np.cos(theta)))
OASPL = 50*np.log10((velocity/Units.kts)/100.0)+10*np.log10(delta*b/(distance**2.0))+8*ND+ \
20*np.log10(DIR*np.sin(theta)*np.cos(theta/2.0))+104.3
SPL = OASPL+10.0*np.log10(0.613*(frequency/fmax)**4*((frequency/fmax)**1.5+0.5)**(-4))-0.03*np.abs(((frequency/fmax)-1))**1.5
return SPL
| [
"numpy.abs",
"numpy.zeros",
"numpy.sin",
"numpy.cos",
"numpy.log10"
] | [((2524, 2535), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (2530, 2535), True, 'import numpy as np\n'), ((2613, 2625), 'numpy.zeros', 'np.zeros', (['(24)'], {}), '(24)\n', (2621, 2625), True, 'import numpy as np\n'), ((2570, 2581), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2576, 2581), True, 'import numpy as np\n'), ((2903, 2991), 'numpy.log10', 'np.log10', (['(0.613 * (frequency / fmax) ** 4 * ((frequency / fmax) ** 1.5 + 0.5) ** -4)'], {}), '(0.613 * (frequency / fmax) ** 4 * ((frequency / fmax) ** 1.5 + 0.5\n ) ** -4)\n', (2911, 2991), True, 'import numpy as np\n'), ((2978, 3006), 'numpy.abs', 'np.abs', (['(frequency / fmax - 1)'], {}), '(frequency / fmax - 1)\n', (2984, 3006), True, 'import numpy as np\n'), ((2689, 2702), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2695, 2702), True, 'import numpy as np\n'), ((2726, 2764), 'numpy.log10', 'np.log10', (['(velocity / Units.kts / 100.0)'], {}), '(velocity / Units.kts / 100.0)\n', (2734, 2764), True, 'import numpy as np\n'), ((2766, 2803), 'numpy.log10', 'np.log10', (['(delta * b / distance ** 2.0)'], {}), '(delta * b / distance ** 2.0)\n', (2774, 2803), True, 'import numpy as np\n'), ((2850, 2869), 'numpy.cos', 'np.cos', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (2856, 2869), True, 'import numpy as np\n'), ((2836, 2849), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2842, 2849), True, 'import numpy as np\n')] |
import random
import perimeter
import numpy as np
import unittest
import slice
from util import printBigArray
class PerimeterTest(unittest.TestCase):
def test_lines_to_pixels(self):
test = [[(0, 0, 0), (3, 0, 0)],
[(9, 9, 0), (3, 9, 0)],
[(3, 0, 0), (9, 9, 0)],
[(3, 9, 0), (0, 0, 0)]]
actual = np.zeros((13, 13), dtype=bool)
perimeter.linesToVoxels(test,actual)
expected = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertEqual(expected, actual.astype(int).tolist())
def test_cross_line(self):
self.assertTrue(perimeter.onLine([(0,0,0),(2,2,0)],1,1))
self.assertTrue(perimeter.onLine([(2,2,0),(0,0,0)],1,1))
self.assertFalse(perimeter.onLine([(2,2,0),(0,0,0)],2,1))
self.assertFalse(perimeter.onLine([(2,2,0),(0,0,0)],1,2))
self.assertTrue(perimeter.onLine([(0,0,0),(4,2,0)],2,1))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"perimeter.linesToVoxels",
"perimeter.onLine",
"numpy.zeros"
] | [((1617, 1632), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1630, 1632), False, 'import unittest\n'), ((365, 395), 'numpy.zeros', 'np.zeros', (['(13, 13)'], {'dtype': 'bool'}), '((13, 13), dtype=bool)\n', (373, 395), True, 'import numpy as np\n'), ((404, 441), 'perimeter.linesToVoxels', 'perimeter.linesToVoxels', (['test', 'actual'], {}), '(test, actual)\n', (427, 441), False, 'import perimeter\n'), ((1281, 1327), 'perimeter.onLine', 'perimeter.onLine', (['[(0, 0, 0), (2, 2, 0)]', '(1)', '(1)'], {}), '([(0, 0, 0), (2, 2, 0)], 1, 1)\n', (1297, 1327), False, 'import perimeter\n'), ((1346, 1392), 'perimeter.onLine', 'perimeter.onLine', (['[(2, 2, 0), (0, 0, 0)]', '(1)', '(1)'], {}), '([(2, 2, 0), (0, 0, 0)], 1, 1)\n', (1362, 1392), False, 'import perimeter\n'), ((1412, 1458), 'perimeter.onLine', 'perimeter.onLine', (['[(2, 2, 0), (0, 0, 0)]', '(2)', '(1)'], {}), '([(2, 2, 0), (0, 0, 0)], 2, 1)\n', (1428, 1458), False, 'import perimeter\n'), ((1478, 1524), 'perimeter.onLine', 'perimeter.onLine', (['[(2, 2, 0), (0, 0, 0)]', '(1)', '(2)'], {}), '([(2, 2, 0), (0, 0, 0)], 1, 2)\n', (1494, 1524), False, 'import perimeter\n'), ((1543, 1589), 'perimeter.onLine', 'perimeter.onLine', (['[(0, 0, 0), (4, 2, 0)]', '(2)', '(1)'], {}), '([(0, 0, 0), (4, 2, 0)], 2, 1)\n', (1559, 1589), False, 'import perimeter\n')] |
# -------------------------------------------------------------------------------------------------
# scientific
import numpy as np
# -------------------------------------------------------------------------------------------------
# system
from math import sqrt
from PyQuantum.Common.html import *
import copy
# -------------------------------------------------------------------------------------------------
# Common
from PyQuantum.Common.Matrix import *
from PyQuantum.Common.Assert import *
from PyQuantum.Common.Print import *
# -------------------------------------------------------------------------------------------------
import html
import pandas as pd
import webbrowser
class Hamiltonian:
# ---------------------------------------------------------------------------------------------
def __init__(self, capacity, cavity):
self.cavity = cavity
self.states = {}
count = 0
self.capacity = capacity
M = capacity
self.n = n = cavity.n
wc = cavity.wc
wa = cavity.wa
g = cavity.g
self.DIME = []
self.H_dims = {}
# ---------------------------------------
for I in range(M, -1, -1):
_min = min(I, n)
dime = (_min + 1) ** 2
self.DIME.append(dime)
# for I in range(M, -1, -1):
# _min = min(I, n)
# count = 0
# for i1 in range(0, _min + 1):
# for i2 in range(0, min(n, I - i1) + 1):
# count += 1
# self.DIME.append(count)
self.size = np.sum(self.DIME)
self.matrix = Matrix(self.size, self.size, dtype=np.complex128)
d = 0
for I in range(M, -1, -1):
i = 1
COUNT = count
for i1 in range(0, min(I, n) + 1):
for i2 in range(0, min(n, I - i1) + 1):
j = 1
self.states[count] = [I - i1 - i2, i1, i2]
for j1 in range(0, min(I, n) + 1):
for j2 in range(0, min(n, I - j1) + 1):
if i1 != j1:
p = [i1, j1]
elif i2 != j2:
p = [i2, j2]
else:
p = [1, 2]
mi = min(p[0], p[1])
kappa = sqrt((n - mi) * (mi + 1))
count += (i == j)
if abs(i1 - j1) + abs(i2 - j2) == 1:
self.matrix.data[
COUNT + i - 1, COUNT + j - 1] = g * sqrt(max(I - i1 - i2, I - j1 - j2)) * kappa
elif abs(i1 - j1) + abs(i2 - j2) == 0:
self.matrix.data[COUNT + i - 1, COUNT + j -
1] = (I - (i1 + i2)) * wc + (i1 + i2) * wa
else:
self.matrix.data[COUNT +
i - 1, COUNT + j - 1] = 0
j += 1
i += 1
self.matrix.data = self.matrix.data[0:count, 0:count]
self.data = self.matrix.data
self.size = np.shape(self.matrix.data)[0]
self.matrix.m = self.matrix.n = self.size
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
def get_index(self, state):
for k, v in self.states.items():
if v == state:
return k
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
def to_csv(self, filename):
self.matrix.to_csv(filename)
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
def get_states(self):
return self.states
# ---------------------------------------------------------------------------------------------
def print_html(self, filename):
f = open(filename, "w")
# html = """ <!DOCTYPE html>
# <html>
# <head>
# <title>
# States
# </title>
# </head>
# <body>
# <table border=1>
# """
# html += "<tr>" html += "<td>" html += "</td>"
# # for i in range(0, len(self.states)):
# # html += "<td>" # html += "[" + str(self.states[i].n1) + "," + str(self.states[i].n2) + "]" # html += "</td>"
# # html += "</tr>"
# # for i in range(0, len(self.states)):
# # html += "<tr>" # html += "<td>" # html += "[" + str(self.states[i].n1) + "," + str(self.states[i].n2) + "]" # html += "</td>"
# # for j in range(0, len(self.states)):
# # html += "<td>"
# # if sqrt:
# # html += "√" + "<span style="text-decoration:overline;">" + str(abs(self.matrix.data[i, j]) / self.g) + "</span>" # else:
# # html += "√" + "<span style="text-decoration:overline;">" + str(abs(self.matrix.data[i, j])) + "</span>"
# # html += "</td>"
# # html += "</tr>"
# html += """ </table>
# </body>
# </html>
# """
# f.write(html)
f.close()
webbrowser.open(filename)
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
# def init_states(self):
# self.states = []
# s = St(self.cavity)
# self.states.append(copy.copy(s))
# while(s.inc()):
# self.states.append(copy.copy(s))
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
def print_states(self):
print("States:", color="green")
print()
for k, v in self.states.items():
print(v)
print()
# ---------------------------------------------------------------------------------------------
class St:
def __init__(self, cv):
self.capacity = cv.capacity
self.n = cv.n
self.n1 = 0
self.n2 = 0
def inc(self):
if self.n2 < self.n and self.n1 + self.n2 < self.capacity:
self.n2 += 1
else:
self.n2 = 0
if self.n1 < self.n and self.n1 + self.n2 < self.capacity:
self.n1 += 1
else:
return False
return True
def print(self):
print("[" + str(self.n1) + "," + str(self.n2) + "]")
| [
"numpy.shape",
"webbrowser.open",
"numpy.sum",
"math.sqrt"
] | [((1600, 1617), 'numpy.sum', 'np.sum', (['self.DIME'], {}), '(self.DIME)\n', (1606, 1617), True, 'import numpy as np\n'), ((5895, 5920), 'webbrowser.open', 'webbrowser.open', (['filename'], {}), '(filename)\n', (5910, 5920), False, 'import webbrowser\n'), ((3328, 3354), 'numpy.shape', 'np.shape', (['self.matrix.data'], {}), '(self.matrix.data)\n', (3336, 3354), True, 'import numpy as np\n'), ((2438, 2463), 'math.sqrt', 'sqrt', (['((n - mi) * (mi + 1))'], {}), '((n - mi) * (mi + 1))\n', (2442, 2463), False, 'from math import sqrt\n')] |
import sys
import matplotlib.pyplot as plt
import numpy as np
def isReal(txt):
try:
float(txt)
return True
except ValueError:
return False
#dicionários
med_dic={1:{'ID':1,'Nome Comercial':0,'Composto principal':0,'Fórmula Química':0,'Meia-vida de eliminação':0,'Número de comprimidos por embalagem':0,'Preço da embalagem em 3 fornecedores':{'A':0,'B':0,'C':0},'Quantidade do composto principal por comprimido':0,'Dose diária para um adulto':0,'As 3 reações adversa mais comuns':{'A':0,'B':0,'C':0}},
2:{'ID':2,'Nome Comercial':0,'Composto principal':0,'Fórmula Química':0,'Meia-vida de eliminação':0,'Número de comprimidos por embalagem':0,'Preço da embalagem em 3 fornecedores':{'A':0,'B':0,'C':0},'Quantidade do composto principal por comprimido':0,'Dose diária para um adulto':0,'As 3 reações adversa mais comuns':{'A':0,'B':0,'C':0}},
3:{'ID':3,'Nome Comercial':0,'Composto principal':0,'Fórmula Química':0,'Meia-vida de eliminação':0,'Número de comprimidos por embalagem':0,'Preço da embalagem em 3 fornecedores':{'A':0,'B':0,'C':0},'Quantidade do composto principal por comprimido':0,'Dose diária para um adulto':0,'As 3 reações adversa mais comuns':{'A':0,'B':0,'C':0}},
4:{'ID':4,'Nome Comercial':0,'Composto principal':0,'Fórmula Química':0,'Meia-vida de eliminação':0,'Número de comprimidos por embalagem':0,'Preço da embalagem em 3 fornecedores':{'A':0,'B':0,'C':0},'Quantidade do composto principal por comprimido':0,'Dose diária para um adulto':0,'As 3 reações adversa mais comuns':{'A':0,'B':0,'C':0}},
5:{'ID':5,'Nome Comercial':0,'Composto principal':0,'Fórmula Química':0,'Meia-vida de eliminação':0,'Número de comprimidos por embalagem':0,'Preço da embalagem em 3 fornecedores':{'A':0,'B':0,'C':0},'Quantidade do composto principal por comprimido':0,'Dose diária para um adulto':0,'As 3 reações adversa mais comuns':{'A':0,'B':0,'C':0}},
6:{'ID':6,'Nome Comercial':0,'Composto principal':0,'Fórmula Química':0,'Meia-vida de eliminação':0,'Número de comprimidos por embalagem':0,'Preço da embalagem em 3 fornecedores':{'A':0,'B':0,'C':0},'Quantidade do composto principal por comprimido':0,'Dose diária para um adulto':0,'As 3 reações adversa mais comuns':{'A':0,'B':0,'C':0}},
7:{'ID':7,'Nome Comercial':0,'Composto principal':0,'Fórmula Química':0,'Meia-vida de eliminação':0,'Número de comprimidos por embalagem':0,'Preço da embalagem em 3 fornecedores':{'A':0,'B':0,'C':0},'Quantidade do composto principal por comprimido':0,'Dose diária para um adulto':0,'As 3 reações adversa mais comuns':{'A':0,'B':0,'C':0}},
8:{'ID':8,'Nome Comercial':0,'Composto principal':0,'Fórmula Química':0,'Meia-vida de eliminação':0,'Número de comprimidos por embalagem':0,'Preço da embalagem em 3 fornecedores':{'A':0,'B':0,'C':0},'Quantidade do composto principal por comprimido':0,'Dose diária para um adulto':0,'As 3 reações adversa mais comuns':{'A':0,'B':0,'C':0}},
9:{'ID':9,'Nome Comercial':0,'Composto principal':0,'Fórmula Química':0,'Meia-vida de eliminação':0,'Número de comprimidos por embalagem':0,'Preço da embalagem em 3 fornecedores':{'A':0,'B':0,'C':0},'Quantidade do composto principal por comprimido':0,'Dose diária para um adulto':0,'As 3 reações adversa mais comuns':{'A':0,'B':0,'C':0}},
10:{'ID':10,'Nome Comercial':0,'Composto principal':0,'Fórmula Química':0,'Meia-vida de eliminação':0,'Número de comprimidos por embalagem':0,'Preço da embalagem em 3 fornecedores':{'A':0,'B':0,'C':0},'Quantidade do composto principal por comprimido':0,'Dose diária para um adulto':0,'As 3 reações adversa mais comuns':{'A':0,'B':0,'C':0}}
}
#listas
nc_list=[]
cp_list=[]
fq_list=[]
mde_list=[]
nce_list=[]
pef_list=[]
mpef_list=[]
qcc_list=[]
dda_list=[]
rac_list=[]
mqcc_list=[]
#input
for i in range(1,11):
while True:
med_dic[i]['Nome Comercial']=(input('Nome Comercial?'))
nc_list.append(med_dic[i]['Nome Comercial'])
nc=med_dic[i]['Nome Comercial']
if nc == "exit":
sys.exit('Parada abrupta do programa!')
elif nc.isalpha():
break
print("Valor inválido!")
while True:
med_dic[i]['Composto principal']=input('Composto principal?')
cp_list.append(med_dic[i]['Composto principal'])
cp=med_dic[i]['Composto principal']
if cp == "exit":
sys.exit('Parada abrupta do programa!')
elif cp.isalpha():
break
print("Valor inválido!")
while True:
med_dic[i]['Fórmula Química']=input('Fórmula Química?')
fq_list.append(med_dic[i]['Fórmula Química'])
fq=med_dic[i]['Fórmula Química']
if fq == "exit":
sys.exit('Parada abrupta do programa!')
elif fq.isalnum():
break
print("Valor inválido!")
while True:
med_dic[i]['Meia-vida de eliminação']=input('Meia-vida de eliminação (horas)?')
mde_list.append(med_dic[i]['Meia-vida de eliminação'])
mde=med_dic[i]['Meia-vida de eliminação']
if mde == "exit":
sys.exit('Parada abrupta do programa!')
elif mde.isdigit():
break
print("Valor inválido!")
while True:
med_dic[i]['Número de comprimidos por embalagem']=input('Número de comprimidos por embalagem?')
nce_list.append(med_dic[i]['Número de comprimidos por embalagem'])
nce=med_dic[i]['Número de comprimidos por embalagem']
if nce == "exit":
sys.exit('Parada abrupta do programa!')
elif nce.isdigit():
break
print("Valor inválido!")
while True:
try:
med_dic[i]['Preço da embalagem em 3 fornecedores']['A']=float(input('Preço da embalagem em 3 fornecedores? Embalagem A: R$'))
a=med_dic[i]['Preço da embalagem em 3 fornecedores']['A']
break
except ValueError:
print("Valor inválido!")
continue
while True:
try:
med_dic[i]['Preço da embalagem em 3 fornecedores']['B']=float(input('Preço da embalagem em 3 fornecedores? Embalagem B: R$'))
b=med_dic[i]['Preço da embalagem em 3 fornecedores']['B']
break
except ValueError:
print("Valor inválido!")
continue
while True:
try:
med_dic[i]['Preço da embalagem em 3 fornecedores']['C']=float(input('Preço da embalagem em 3 fornecedores? Embalagem C: R$'))
c=med_dic[i]['Preço da embalagem em 3 fornecedores']['C']
break
except ValueError:
print("Valor inválido!")
continue
mpef=(a+b+c)/3
mpef_list.append(mpef)
while True:
try:
med_dic[i]['Quantidade do composto principal por comprimido']=float(input('Quantidade do composto principal por comprimido?(em mg)'))
qcc_list.append(med_dic[i]['Quantidade do composto principal por comprimido'])
qcc=med_dic[i]['Quantidade do composto principal por comprimido']
break
except ValueError:
print("Valor inválido!")
continue
mqcc=mpef/qcc
mqcc_list.append(mqcc)
while True:
med_dic[i]['Dose diária para um adulto']=input('Dose diária para um adulto?(em mg)')
dda_list.append(med_dic[i]['Dose diária para um adulto'])
dda=med_dic[i]['Dose diária para um adulto']
if dda == "exit":
sys.exit('Parada abrupta do programa!')
elif isReal(dda):
break
print("Valor inválido!")
while True:
med_dic[i]['As 3 reações adversa mais comuns']['A']=input('As 3 reações adversas mais comuns? Reação A: ')
a=med_dic[i]['As 3 reações adversa mais comuns']['A']
if a == "exit":
sys.exit('Parada abrupta do programa!')
elif a.isalpha():
break
print("Valor inválido!")
while True:
med_dic[i]['As 3 reações adversa mais comuns']['B']=input('As 3 reações adversas mais comuns? Reação B: ')
b=med_dic[i]['As 3 reações adversa mais comuns']['B']
if b == "exit":
sys.exit('Parada abrupta do programa!')
elif b.isalpha():
break
print("Valor inválido!")
while True:
med_dic[i]['As 3 reações adversa mais comuns']['C']=input('As 3 reações adversas mais comuns? Reação C: ')
c=med_dic[i]['As 3 reações adversa mais comuns']['C']
if c == "exit":
sys.exit('Parada abrupta do programa!')
elif c.isalpha():
break
print("Valor inválido!")
rac_list.append(med_dic[i]['As 3 reações adversa mais comuns'])
if i==10:
break
else:
continuar=input("Digite 'p' para parar: ")
if continuar == "p":
break
#gráfico 1
fig, ax = plt.subplots()
index = np.arange(i)
bar_width = 0.15
opacity = 0.8
rects1 = plt.bar(index, mpef_list, bar_width,
alpha=opacity,
color='r',
label='Média de Preço')
plt.xlabel('Medicamentos')
plt.ylabel('Reais(R$)')
plt.title('Valores por Medicamento')
plt.xticks(index, (nc_list))
plt.legend()
plt.tight_layout()
plt.show()
#gráfico 2
fig1, ax1 = plt.subplots()
index = np.arange(i)
bar_width = 0.15
opacity = 0.8
rects1 = plt.bar(index, mqcc_list, bar_width,
alpha=opacity,
color='r',
label='Média por 1 mg')
plt.xlabel('Medicamentos')
plt.ylabel('Reais(R$)')
plt.title('Média de Preço por Miligramas do Componente Principal')
plt.xticks(index, (nc_list))
plt.legend()
plt.tight_layout()
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.legend",
"numpy.arange",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"sys.exit"
] | [((9062, 9076), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9074, 9076), True, 'import matplotlib.pyplot as plt\n'), ((9086, 9098), 'numpy.arange', 'np.arange', (['i'], {}), '(i)\n', (9095, 9098), True, 'import numpy as np\n'), ((9144, 9235), 'matplotlib.pyplot.bar', 'plt.bar', (['index', 'mpef_list', 'bar_width'], {'alpha': 'opacity', 'color': '"""r"""', 'label': '"""Média de Preço"""'}), "(index, mpef_list, bar_width, alpha=opacity, color='r', label=\n 'Média de Preço')\n", (9151, 9235), True, 'import matplotlib.pyplot as plt\n'), ((9288, 9314), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Medicamentos"""'], {}), "('Medicamentos')\n", (9298, 9314), True, 'import matplotlib.pyplot as plt\n'), ((9316, 9339), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reais(R$)"""'], {}), "('Reais(R$)')\n", (9326, 9339), True, 'import matplotlib.pyplot as plt\n'), ((9341, 9377), 'matplotlib.pyplot.title', 'plt.title', (['"""Valores por Medicamento"""'], {}), "('Valores por Medicamento')\n", (9350, 9377), True, 'import matplotlib.pyplot as plt\n'), ((9379, 9405), 'matplotlib.pyplot.xticks', 'plt.xticks', (['index', 'nc_list'], {}), '(index, nc_list)\n', (9389, 9405), True, 'import matplotlib.pyplot as plt\n'), ((9409, 9421), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9419, 9421), True, 'import matplotlib.pyplot as plt\n'), ((9426, 9444), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9442, 9444), True, 'import matplotlib.pyplot as plt\n'), ((9446, 9456), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9454, 9456), True, 'import matplotlib.pyplot as plt\n'), ((9486, 9500), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9498, 9500), True, 'import matplotlib.pyplot as plt\n'), ((9510, 9522), 'numpy.arange', 'np.arange', (['i'], {}), '(i)\n', (9519, 9522), True, 'import numpy as np\n'), ((9568, 9659), 'matplotlib.pyplot.bar', 'plt.bar', (['index', 'mqcc_list', 'bar_width'], {'alpha': 'opacity', 'color': '"""r"""', 'label': '"""Média por 1 mg"""'}), "(index, mqcc_list, bar_width, alpha=opacity, color='r', label=\n 'Média por 1 mg')\n", (9575, 9659), True, 'import matplotlib.pyplot as plt\n'), ((9712, 9738), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Medicamentos"""'], {}), "('Medicamentos')\n", (9722, 9738), True, 'import matplotlib.pyplot as plt\n'), ((9740, 9763), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reais(R$)"""'], {}), "('Reais(R$)')\n", (9750, 9763), True, 'import matplotlib.pyplot as plt\n'), ((9765, 9831), 'matplotlib.pyplot.title', 'plt.title', (['"""Média de Preço por Miligramas do Componente Principal"""'], {}), "('Média de Preço por Miligramas do Componente Principal')\n", (9774, 9831), True, 'import matplotlib.pyplot as plt\n'), ((9833, 9859), 'matplotlib.pyplot.xticks', 'plt.xticks', (['index', 'nc_list'], {}), '(index, nc_list)\n', (9843, 9859), True, 'import matplotlib.pyplot as plt\n'), ((9863, 9875), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9873, 9875), True, 'import matplotlib.pyplot as plt\n'), ((9880, 9898), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9896, 9898), True, 'import matplotlib.pyplot as plt\n'), ((9900, 9910), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9908, 9910), True, 'import matplotlib.pyplot as plt\n'), ((4028, 4067), 'sys.exit', 'sys.exit', (['"""Parada abrupta do programa!"""'], {}), "('Parada abrupta do programa!')\n", (4036, 4067), False, 'import sys\n'), ((4385, 4424), 'sys.exit', 'sys.exit', (['"""Parada abrupta do programa!"""'], {}), "('Parada abrupta do programa!')\n", (4393, 4424), False, 'import sys\n'), ((4730, 4769), 'sys.exit', 'sys.exit', (['"""Parada abrupta do programa!"""'], {}), "('Parada abrupta do programa!')\n", (4738, 4769), False, 'import sys\n'), ((5118, 5157), 'sys.exit', 'sys.exit', (['"""Parada abrupta do programa!"""'], {}), "('Parada abrupta do programa!')\n", (5126, 5157), False, 'import sys\n'), ((5551, 5590), 'sys.exit', 'sys.exit', (['"""Parada abrupta do programa!"""'], {}), "('Parada abrupta do programa!')\n", (5559, 5590), False, 'import sys\n'), ((7610, 7649), 'sys.exit', 'sys.exit', (['"""Parada abrupta do programa!"""'], {}), "('Parada abrupta do programa!')\n", (7618, 7649), False, 'import sys\n'), ((7970, 8009), 'sys.exit', 'sys.exit', (['"""Parada abrupta do programa!"""'], {}), "('Parada abrupta do programa!')\n", (7978, 8009), False, 'import sys\n'), ((8324, 8363), 'sys.exit', 'sys.exit', (['"""Parada abrupta do programa!"""'], {}), "('Parada abrupta do programa!')\n", (8332, 8363), False, 'import sys\n'), ((8678, 8717), 'sys.exit', 'sys.exit', (['"""Parada abrupta do programa!"""'], {}), "('Parada abrupta do programa!')\n", (8686, 8717), False, 'import sys\n')] |
"""Module that allows calibrating image matrices using calibration vectors.
The calibration formula is Γ² = γ * α² where Γ is the calibrated
matrix, γ is the uncorrected matrix, and α is the calibration vector.
Note that α and γ must share the same dimension (if the matrix of
pixels is m by n , then α must be length m).
If the length of α is less than m, it means that the calibration vector
vector is subsampled, and needs to be interpolated before it is
applied.
"""
import json
import numpy as np
version = "1"
def apply_calibration(calibration_filename, image_matrix):
"""Apply the provided calibration vector to the provided image matrix.
Parameters
----------
calibration_filename : string
The path of the json file that describes the calibration vector.
The expected format of this file is the following:
{
"calibration_info":
{
"calibration_spacing": int,
"calibration_vector": float[]
}
}
image_matrix : 2D numpy array
The image matrix to calibrate.
Returns
-------
Result : 2D numpy array
The calibrated image matrix.
"""
calibration_vector = []
calibration_spacing = 1
with open(calibration_filename, "r") as f:
calibration_vector, calibration_spacing = _read_calibration_info(f)
target_vector_size = image_matrix.shape[1]
interpolated_vector = _interpolate_vector(calibration_vector,
calibration_spacing,
target_vector_size)
return _calibrate_matrix(image_matrix, interpolated_vector)
def _read_calibration_info(fd):
"""Read calibration information from the provided stream.
Parameters
----------
fd : IO stream
The stream should contain a json serialization of the
calibration vector information.
The expected format of this information is the following:
{
"calibration_info":
{
"calibration_spacing": int,
"calibration_vector": float[]
}
}
Returns
-------
Result : tuple
(calibration_vector, calibration_spacing).
"""
calibration_data = json.load(fd)
calibration_vector = (calibration_data["calibration_info"]
["calibration_vector"])
calibration_spacing = (calibration_data["calibration_info"]
["calibration_spacing"])
return (calibration_vector, calibration_spacing)
def _calibrate_matrix(matrix, vector):
"""Apply the calibration vector on the provided matrix.
Parameters
----------
matrix : numpy 2D array
The intial image matrix.
vector : numpy 1D array
The compensation vector to apply.
Returns
-------
Result : 2D numpy array
The new matrix, with the caliration vector applied.
"""
return np.sqrt(matrix * vector**2)
def _interpolate_vector(vector, spacing, target_size):
"""Interpolate the vector into a new one with the target size.
Parameters
----------
vector : array-like
The vector to interpolate from.
spacing: int
Distance between samples in the original vector.
spacing = 1 if the vector is not subsampled and does not need
to be interpolated.
spacing > 1 otherwise.
target_size: int
The size of the interpolated vector.
Returns
-------
Result : numpy array
The interpolated vector.
"""
return np.interp(range(target_size),
range(0, target_size, spacing),
vector)
| [
"json.load",
"numpy.sqrt"
] | [((2299, 2312), 'json.load', 'json.load', (['fd'], {}), '(fd)\n', (2308, 2312), False, 'import json\n'), ((3024, 3053), 'numpy.sqrt', 'np.sqrt', (['(matrix * vector ** 2)'], {}), '(matrix * vector ** 2)\n', (3031, 3053), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import time
from imageLoader import getPaddedROI,training_data_feeder
import math
'''
created by <NAME>
a sub-model for human pose estimation
'''
#input data feeder !!! important !!! The hintSetx_norm_batches are 5d tensors!!! To accommodate, the batch size are fixed to 2
def get_train_data(batch_size):
#load image from dataset(train set)
joint_data_path = "./custom_data.json"
train_val_path = "./train_val_indices.json"
imgpath = "./000/"
input_size = 301
hint_roi_size = 23
hintSet01_norm_batch = []
hintSet02_norm_batch = []
t_img_batch = []
t_label_norm_batch = []
#load data
for i in range(batch_size):
hintSet01,hintSet02,t_img,t_label_norm = training_data_feeder(joint_data_path, train_val_path, imgpath, input_size, hint_roi_size )
#Normalize the image pixel values to 0~1
hintSet01_norm = []
hintSet02_norm = []
t_img = np.float32(t_img /255.0)
#print(type(t_label_norm))
for rois in hintSet01:
tmp = np.float32(rois / 255.0)
hintSet01_norm.append(tmp.tolist())
for rois in hintSet02:
tmp = np.float32(rois / 255.0)
hintSet02_norm.append(tmp.tolist())
hintSet01_norm_batch.append(hintSet01_norm)
hintSet02_norm_batch.append(hintSet02_norm)
t_img_batch.append(t_img)
t_label_norm_batch.append( t_label_norm)
return hintSet01_norm_batch, hintSet02_norm_batch, t_img_batch, t_label_norm_batch
# locate minimum value Position( the shortest Hamming distance)
def locateMin_and_get_loss( distance_map ,original_map_size, label ):
#locate the minimum value position
tmin = tf.argmin( tf.reshape(distance_map,[-1]), output_type=tf.int32)
#!!!!!must notice!!!! The divisor to normalize the final position was set manually (now as 76)
#It was because I cannot get the desired result with "original_map_size"
pos = tf.cast( ((tf.floormod(tmin, original_map_size) +1 ) / 76 , (tf.floordiv(tmin , original_map_size) +1 )/ 76),tf.float32)
dist =tf.abs( tf.norm(label - pos, ord='euclidean'))
return dist,pos
def get_total_loss_and_result( out_2_l3 , out_h2_l3 , out_h1_l3 , orig_feature_map_size , labels ):
dist0,pos0 = locateMin_and_get_loss( tf.minimum( tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h2_l3[0] )) , axis=2 ),
tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h1_l3[0] )) , axis=2 ) ) , orig_feature_map_size, labels[0])
dist1,pos1 = locateMin_and_get_loss( tf.minimum( tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h2_l3[1] )) , axis=2 ),
tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h1_l3[1] )) , axis=2 ) ) , orig_feature_map_size, labels[1])
dist2,pos2 = locateMin_and_get_loss( tf.minimum( tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h2_l3[2] )) , axis=2 ),
tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h1_l3[2] )) , axis=2 ) ) , orig_feature_map_size, labels[2])
dist3,pos3 = locateMin_and_get_loss( tf.minimum( tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h2_l3[3] )) , axis=2 ),
tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h1_l3[3] )) , axis=2 ) ) , orig_feature_map_size, labels[3])
dist4,pos4 = locateMin_and_get_loss( tf.minimum( tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h2_l3[4] )) , axis=2 ),
tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h1_l3[4] )) , axis=2 ) ) , orig_feature_map_size, labels[4])
dist5,pos5 = locateMin_and_get_loss( tf.minimum( tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h2_l3[5] )) , axis=2 ),
tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h1_l3[5] )) , axis=2 ) ) , orig_feature_map_size, labels[5])
dist6,pos6 = locateMin_and_get_loss( tf.minimum( tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h2_l3[6] )) , axis=2 ),
tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h1_l3[6] )) , axis=2 ) ) , orig_feature_map_size, labels[6])
dist7,pos7 = locateMin_and_get_loss( tf.minimum( tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h2_l3[7] )) , axis=2 ),
tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h1_l3[7] )) , axis=2 ) ) , orig_feature_map_size, labels[7])
dist8,pos8 = locateMin_and_get_loss( tf.minimum( tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h2_l3[8] )) , axis=2 ),
tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h1_l3[8] )) , axis=2 ) ) , orig_feature_map_size, labels[8])
dist9,pos9 = locateMin_and_get_loss( tf.minimum( tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h2_l3[9] )) , axis=2 ),
tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h1_l3[9] )) , axis=2 ) ) , orig_feature_map_size, labels[9])
dist10,pos10 = locateMin_and_get_loss( tf.minimum( tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h2_l3[10])) , axis=2 ),
tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h1_l3[10])) , axis=2 ) ) , orig_feature_map_size, labels[10])
dist11,pos11 = locateMin_and_get_loss( tf.minimum( tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h2_l3[11])) , axis=2 ),
tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h1_l3[11])) , axis=2 ) ) , orig_feature_map_size, labels[11])
dist12,pos12 = locateMin_and_get_loss( tf.minimum( tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h2_l3[12])) , axis=2 ),
tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h1_l3[12])) , axis=2 ) ) , orig_feature_map_size, labels[12])
dist13,pos13 = locateMin_and_get_loss( tf.minimum( tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h2_l3[13])) , axis=2 ),
tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h1_l3[13])) , axis=2 ) ) , orig_feature_map_size, labels[13])
dist14,pos14 = locateMin_and_get_loss( tf.minimum( tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h2_l3[14])) , axis=2 ),
tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h1_l3[14])) , axis=2 ) ) , orig_feature_map_size, labels[14])
dist15,pos15 = locateMin_and_get_loss( tf.minimum( tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h2_l3[15])) , axis=2 ),
tf.reduce_sum (tf.abs(tf.subtract( out_2_l3, out_h1_l3[15])) , axis=2 ) ) , orig_feature_map_size, labels[15])
#total_loss =tf.add_n( tf.add_n( tf.add_n( tf.add_n( tf.add_n( tf.add_n( tf.add_n( tf.add_n( tf.add_n( tf.add_n( tf.add_n( tf.add_n( (tf.add_n( tf.add_n( tf.add_n(dist0 , dist1) , dist2) , dist3) , dist4 ), dist5 ), dist6 ), dist7 ), dist8) , dist9) , dist10) , dist11) , dist12 ), dist13 ), dist14 ), dist15)
total_loss = tf.stack([dist0 , dist1 , dist2 , dist3 , dist4 , dist5 , dist6 , dist7 ,
dist8 , dist9 , dist10 , dist11 , dist12 , dist13 , dist14 , dist15], axis=0)
total_loss = tf.reduce_sum( total_loss )
final_output = tf.stack([pos0, pos1, pos2, pos3, pos4, pos5, pos6, pos7
,pos8 ,pos9 ,pos10 ,pos11 ,pos12 ,pos13 ,pos14,pos15], axis = 0)
return total_loss, final_output
#function to locate the point with maximum value on similarity map
def max_sim_point( sim_map , orig_feature_map_size, image_input_size ):
#get the position of max value
max_pos = tf.argmax( sim_map , output_type=tf.int32)
max_value = sim_map[max_pos]
#reshape the similarity map to 2d format
sim_map= tf.reshape(sim_map,[orig_feature_map_size , orig_feature_map_size])
p = tf.where (tf.equal (sim_map,max_value ) )
def cond_t(p):
return p[0]
def cond_f(p):
return p
joint = tf.cond(tf.shape(p)[0] > 1 , lambda:cond_t(p) , lambda:cond_f(p) )
x = tf.cast(joint[0], tf.float32) * tf.cast((image_input_size/orig_feature_map_size),tf.float32)
y = tf.cast(joint[1], tf.float32) * tf.cast((image_input_size/orig_feature_map_size),tf.float32)
return ( x , y )
def truncated_normal_var(name,shape,dtype):
return(tf.get_variable(name=name, shape=shape, dtype=dtype, initializer=tf.truncated_normal_initializer(stddev=0.01)))
def zero_var(name,shape,dtype):
return(tf.get_variable(name=name, shape=shape, dtype=dtype, initializer=tf.constant_initializer(0.0)))
roi_size = 23
image_input_size = 301
#input placeholders
#batch1 hints
inputs_b1h1 = tf.placeholder(tf.float32, ( 16, roi_size, roi_size, 3), name='inputs_b1h1')
inputs_b1h2 = tf.placeholder(tf.float32, ( 16, roi_size, roi_size, 3), name='inputs_b1h2')
#batch2 hints
inputs_b2h1 = tf.placeholder(tf.float32, ( 16, roi_size, roi_size, 3), name='inputs_b2h1')
inputs_b2h2 = tf.placeholder(tf.float32, ( 16, roi_size, roi_size, 3), name='inputs_b2h2')
#batch3 hints
inputs_b3h1 = tf.placeholder(tf.float32, ( 16, roi_size, roi_size, 3), name='inputs_b3h1')
inputs_b3h2 = tf.placeholder(tf.float32, ( 16, roi_size, roi_size, 3), name='inputs_b3h2')
inputs_s = tf.placeholder(tf.float32, (None, image_input_size, image_input_size, 3), name='inputs_s')
labels = tf.placeholder(tf.float32,(None,16,2), name='labels')
#define the model
def paraNet(input):
out_l1 = tf.layers.conv2d(input, 8, [3, 3],strides=(2, 2), padding ='valid' ,name='para_conv_1')
out_l1 = tf.nn.relu6(out_l1)
out_l2 = tf.layers.conv2d(out_l1, 16, [3, 3],strides=(2, 2), padding ='valid' ,name='para_conv_2')
out_l2 = tf.nn.relu6(out_l2)
out_l3 = tf.layers.conv2d(out_l2, 32, [5, 5],strides=(1, 1), padding ='valid' ,name='para_conv_3')
return out_l3
#network pipeline to create the first Hint Hash Sets (Three batches)
with tf.variable_scope('conv'):
out_b1h1_l3 = paraNet(inputs_b1h1)
#flatten and binerize the hashs
out_b1h1_l3 =tf.squeeze( tf.cast(tf.sigmoid(out_b1h1_l3), tf.int32))
with tf.variable_scope('conv', reuse=True):
out_b2h1_l3 = paraNet(inputs_b2h1)
#flatten and binerize the hashs
out_b2h1_l3 =tf.squeeze( tf.cast(tf.sigmoid(out_b2h1_l3), tf.int32))
with tf.variable_scope('conv', reuse=True):
out_b3h1_l3 = paraNet(inputs_b3h1)
#flatten and binerize the hashs
out_b3h1_l3 =tf.squeeze( tf.cast(tf.sigmoid(out_b3h1_l3), tf.int32))
#concatenate hint Hash from the 3 batches
#out_h1_l3 = tf.stack([out_b1h1_l3 , out_b2h1_l3 , out_b3h1_l3])
#network pipeline to create the Second Hint Hash Sets
with tf.variable_scope('conv', reuse=True):
out_b1h2_l3 = paraNet(inputs_b1h2)
#flatten and binerize the hashs
out_b1h2_l3 =tf.squeeze( tf.cast(tf.sigmoid(out_b1h2_l3), tf.int32))
with tf.variable_scope('conv', reuse=True):
out_b2h2_l3 = paraNet(inputs_b2h2)
#flatten and binerize the hashs
out_b2h2_l3 =tf.squeeze( tf.cast(tf.sigmoid(out_b2h2_l3), tf.int32))
with tf.variable_scope('conv', reuse=True):
out_b3h2_l3 = paraNet(inputs_b3h2)
#flatten and binerize the hashs
out_b3h2_l3 =tf.squeeze( tf.cast(tf.sigmoid(out_b3h2_l3), tf.int32))
#concatenate hint Hash from the 3 batches
#out_h2_l3 = tf.stack([out_b1h2_l3 , out_b2h2_l3 , out_b3h2_l3])
with tf.variable_scope('conv', reuse=True):
out_2_l1 = tf.layers.conv2d(inputs_s, 8, [3, 3],strides=(2, 2), padding ='same' ,name='para_conv_1')
out_2_l1 = tf.nn.relu6(out_2_l1)
out_2_l2 = tf.layers.conv2d(out_2_l1, 16, [3, 3],strides=(2, 2), padding ='same' ,name='para_conv_2')
out_2_l2 = tf.nn.relu6(out_2_l2)
out_2_l3 = tf.layers.conv2d(out_2_l2, 32, [5, 5],strides=(1, 1), padding ='same' ,name='para_conv_3')
#binerize the value
out_2_l3 = tf.cast(tf.sigmoid(out_2_l3), tf.int32)
#iterate through each pixel of the final feature map
#turn the value into hashes
orig_feature_map_size = tf.shape(out_2_l3)[1]
loss_batch1, result_batch1 = get_total_loss_and_result(out_2_l3[0] , out_b1h2_l3 , out_b1h1_l3 , orig_feature_map_size , labels[0])
loss_batch2, result_batch2 = get_total_loss_and_result(out_2_l3[1] , out_b2h2_l3 , out_b2h1_l3 , orig_feature_map_size , labels[1])
loss_batch3, result_batch3 = get_total_loss_and_result(out_2_l3[2] , out_b3h2_l3 , out_b3h1_l3 , orig_feature_map_size , labels[2])
loss_of_all_batches = tf.stack([loss_batch1 , loss_batch2 , loss_batch3], axis =0)
#ValueError: No gradients provided for any variable, check your graph for ops that do not support gradients, between variables
train_step = tf.train.GradientDescentOptimizer(0.01).minimize( loss_of_all_batches ,var_list=tf.trainable_variables())
init = tf.global_variables_initializer()
batchsize = 3
with tf.Session() as sess:
writer = tf.summary.FileWriter("./variable_graph",graph = sess.graph)
sess.run(init)
print(tf.trainable_variables())
#print(hash_set01)
#print(out_2_l3)
hintSet01_norm, hintSet02_norm, t_img, t_label_norm = get_train_data(batchsize)
sess.run(train_step , feed_dict={inputs_s: t_img ,
inputs_b1h1: hintSet01_norm[0], inputs_b1h2: hintSet02_norm[0], #batch no.1
inputs_b2h1: hintSet01_norm[1], inputs_b2h2: hintSet02_norm[1], #batch no.2
inputs_b3h1: hintSet01_norm[2], inputs_b3h2: hintSet02_norm[2], #batch no.3
labels: t_label_norm
})
print(temp)
print(np.shape(temp))
'''
for i in range(1000):
hintSet01_norm, hintSet02_norm, t_img, t_label_norm = get_train_data(batchsize)
sess.run(train_step, feed_dict={inputs_s: [ t_img ] ,
inputs_b1h1: hintSet01_norm[0], inputs_b1h2: hintSet02_norm[0], #batch no.1
inputs_b2h1: hintSet01_norm[1], inputs_b2h2: hintSet02_norm[1], #batch no.2
inputs_b3h1: hintSet01_norm[2], inputs_b3h2: hintSet02_norm[2], #batch no.3
labels: t_label_norm
})
if i % 50 == 0:
print(total_loss)
'''
| [
"tensorflow.reduce_sum",
"tensorflow.trainable_variables",
"tensorflow.constant_initializer",
"tensorflow.reshape",
"numpy.shape",
"tensorflow.nn.relu6",
"tensorflow.subtract",
"tensorflow.variable_scope",
"tensorflow.stack",
"tensorflow.placeholder",
"tensorflow.cast",
"tensorflow.summary.Fil... | [((8997, 9072), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(16, roi_size, roi_size, 3)'], {'name': '"""inputs_b1h1"""'}), "(tf.float32, (16, roi_size, roi_size, 3), name='inputs_b1h1')\n", (9011, 9072), True, 'import tensorflow as tf\n'), ((9088, 9163), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(16, roi_size, roi_size, 3)'], {'name': '"""inputs_b1h2"""'}), "(tf.float32, (16, roi_size, roi_size, 3), name='inputs_b1h2')\n", (9102, 9163), True, 'import tensorflow as tf\n'), ((9193, 9268), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(16, roi_size, roi_size, 3)'], {'name': '"""inputs_b2h1"""'}), "(tf.float32, (16, roi_size, roi_size, 3), name='inputs_b2h1')\n", (9207, 9268), True, 'import tensorflow as tf\n'), ((9284, 9359), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(16, roi_size, roi_size, 3)'], {'name': '"""inputs_b2h2"""'}), "(tf.float32, (16, roi_size, roi_size, 3), name='inputs_b2h2')\n", (9298, 9359), True, 'import tensorflow as tf\n'), ((9389, 9464), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(16, roi_size, roi_size, 3)'], {'name': '"""inputs_b3h1"""'}), "(tf.float32, (16, roi_size, roi_size, 3), name='inputs_b3h1')\n", (9403, 9464), True, 'import tensorflow as tf\n'), ((9480, 9555), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(16, roi_size, roi_size, 3)'], {'name': '"""inputs_b3h2"""'}), "(tf.float32, (16, roi_size, roi_size, 3), name='inputs_b3h2')\n", (9494, 9555), True, 'import tensorflow as tf\n'), ((9569, 9663), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, image_input_size, image_input_size, 3)'], {'name': '"""inputs_s"""'}), "(tf.float32, (None, image_input_size, image_input_size, 3),\n name='inputs_s')\n", (9583, 9663), True, 'import tensorflow as tf\n'), ((9669, 9725), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, 16, 2)'], {'name': '"""labels"""'}), "(tf.float32, (None, 16, 2), name='labels')\n", (9683, 9725), True, 'import tensorflow as tf\n'), ((13074, 13107), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (13105, 13107), True, 'import tensorflow as tf\n'), ((7342, 7482), 'tensorflow.stack', 'tf.stack', (['[dist0, dist1, dist2, dist3, dist4, dist5, dist6, dist7, dist8, dist9,\n dist10, dist11, dist12, dist13, dist14, dist15]'], {'axis': '(0)'}), '([dist0, dist1, dist2, dist3, dist4, dist5, dist6, dist7, dist8,\n dist9, dist10, dist11, dist12, dist13, dist14, dist15], axis=0)\n', (7350, 7482), True, 'import tensorflow as tf\n'), ((7538, 7563), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['total_loss'], {}), '(total_loss)\n', (7551, 7563), True, 'import tensorflow as tf\n'), ((7585, 7709), 'tensorflow.stack', 'tf.stack', (['[pos0, pos1, pos2, pos3, pos4, pos5, pos6, pos7, pos8, pos9, pos10, pos11,\n pos12, pos13, pos14, pos15]'], {'axis': '(0)'}), '([pos0, pos1, pos2, pos3, pos4, pos5, pos6, pos7, pos8, pos9, pos10,\n pos11, pos12, pos13, pos14, pos15], axis=0)\n', (7593, 7709), True, 'import tensorflow as tf\n'), ((7966, 8006), 'tensorflow.argmax', 'tf.argmax', (['sim_map'], {'output_type': 'tf.int32'}), '(sim_map, output_type=tf.int32)\n', (7975, 8006), True, 'import tensorflow as tf\n'), ((8105, 8172), 'tensorflow.reshape', 'tf.reshape', (['sim_map', '[orig_feature_map_size, orig_feature_map_size]'], {}), '(sim_map, [orig_feature_map_size, orig_feature_map_size])\n', (8115, 8172), True, 'import tensorflow as tf\n'), ((9775, 9867), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['input', '(8)', '[3, 3]'], {'strides': '(2, 2)', 'padding': '"""valid"""', 'name': '"""para_conv_1"""'}), "(input, 8, [3, 3], strides=(2, 2), padding='valid', name=\n 'para_conv_1')\n", (9791, 9867), True, 'import tensorflow as tf\n'), ((9876, 9895), 'tensorflow.nn.relu6', 'tf.nn.relu6', (['out_l1'], {}), '(out_l1)\n', (9887, 9895), True, 'import tensorflow as tf\n'), ((9909, 10003), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['out_l1', '(16)', '[3, 3]'], {'strides': '(2, 2)', 'padding': '"""valid"""', 'name': '"""para_conv_2"""'}), "(out_l1, 16, [3, 3], strides=(2, 2), padding='valid', name=\n 'para_conv_2')\n", (9925, 10003), True, 'import tensorflow as tf\n'), ((10012, 10031), 'tensorflow.nn.relu6', 'tf.nn.relu6', (['out_l2'], {}), '(out_l2)\n', (10023, 10031), True, 'import tensorflow as tf\n'), ((10045, 10139), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['out_l2', '(32)', '[5, 5]'], {'strides': '(1, 1)', 'padding': '"""valid"""', 'name': '"""para_conv_3"""'}), "(out_l2, 32, [5, 5], strides=(1, 1), padding='valid', name=\n 'para_conv_3')\n", (10061, 10139), True, 'import tensorflow as tf\n'), ((10228, 10253), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv"""'], {}), "('conv')\n", (10245, 10253), True, 'import tensorflow as tf\n'), ((10409, 10446), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv"""'], {'reuse': '(True)'}), "('conv', reuse=True)\n", (10426, 10446), True, 'import tensorflow as tf\n'), ((10602, 10639), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv"""'], {'reuse': '(True)'}), "('conv', reuse=True)\n", (10619, 10639), True, 'import tensorflow as tf\n'), ((10970, 11007), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv"""'], {'reuse': '(True)'}), "('conv', reuse=True)\n", (10987, 11007), True, 'import tensorflow as tf\n'), ((11163, 11200), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv"""'], {'reuse': '(True)'}), "('conv', reuse=True)\n", (11180, 11200), True, 'import tensorflow as tf\n'), ((11356, 11393), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv"""'], {'reuse': '(True)'}), "('conv', reuse=True)\n", (11373, 11393), True, 'import tensorflow as tf\n'), ((11664, 11701), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv"""'], {'reuse': '(True)'}), "('conv', reuse=True)\n", (11681, 11701), True, 'import tensorflow as tf\n'), ((11718, 11812), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['inputs_s', '(8)', '[3, 3]'], {'strides': '(2, 2)', 'padding': '"""same"""', 'name': '"""para_conv_1"""'}), "(inputs_s, 8, [3, 3], strides=(2, 2), padding='same', name=\n 'para_conv_1')\n", (11734, 11812), True, 'import tensorflow as tf\n'), ((11824, 11845), 'tensorflow.nn.relu6', 'tf.nn.relu6', (['out_2_l1'], {}), '(out_2_l1)\n', (11835, 11845), True, 'import tensorflow as tf\n'), ((11861, 11956), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['out_2_l1', '(16)', '[3, 3]'], {'strides': '(2, 2)', 'padding': '"""same"""', 'name': '"""para_conv_2"""'}), "(out_2_l1, 16, [3, 3], strides=(2, 2), padding='same', name\n ='para_conv_2')\n", (11877, 11956), True, 'import tensorflow as tf\n'), ((11967, 11988), 'tensorflow.nn.relu6', 'tf.nn.relu6', (['out_2_l2'], {}), '(out_2_l2)\n', (11978, 11988), True, 'import tensorflow as tf\n'), ((12004, 12099), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['out_2_l2', '(32)', '[5, 5]'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""para_conv_3"""'}), "(out_2_l2, 32, [5, 5], strides=(1, 1), padding='same', name\n ='para_conv_3')\n", (12020, 12099), True, 'import tensorflow as tf\n'), ((12750, 12807), 'tensorflow.stack', 'tf.stack', (['[loss_batch1, loss_batch2, loss_batch3]'], {'axis': '(0)'}), '([loss_batch1, loss_batch2, loss_batch3], axis=0)\n', (12758, 12807), True, 'import tensorflow as tf\n'), ((13128, 13140), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (13138, 13140), True, 'import tensorflow as tf\n'), ((13163, 13222), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""./variable_graph"""'], {'graph': 'sess.graph'}), "('./variable_graph', graph=sess.graph)\n", (13184, 13222), True, 'import tensorflow as tf\n'), ((757, 850), 'imageLoader.training_data_feeder', 'training_data_feeder', (['joint_data_path', 'train_val_path', 'imgpath', 'input_size', 'hint_roi_size'], {}), '(joint_data_path, train_val_path, imgpath, input_size,\n hint_roi_size)\n', (777, 850), False, 'from imageLoader import getPaddedROI, training_data_feeder\n'), ((970, 995), 'numpy.float32', 'np.float32', (['(t_img / 255.0)'], {}), '(t_img / 255.0)\n', (980, 995), True, 'import numpy as np\n'), ((1749, 1779), 'tensorflow.reshape', 'tf.reshape', (['distance_map', '[-1]'], {}), '(distance_map, [-1])\n', (1759, 1779), True, 'import tensorflow as tf\n'), ((2132, 2169), 'tensorflow.norm', 'tf.norm', (['(label - pos)'], {'ord': '"""euclidean"""'}), "(label - pos, ord='euclidean')\n", (2139, 2169), True, 'import tensorflow as tf\n'), ((8191, 8219), 'tensorflow.equal', 'tf.equal', (['sim_map', 'max_value'], {}), '(sim_map, max_value)\n', (8199, 8219), True, 'import tensorflow as tf\n'), ((8388, 8417), 'tensorflow.cast', 'tf.cast', (['joint[0]', 'tf.float32'], {}), '(joint[0], tf.float32)\n', (8395, 8417), True, 'import tensorflow as tf\n'), ((8420, 8481), 'tensorflow.cast', 'tf.cast', (['(image_input_size / orig_feature_map_size)', 'tf.float32'], {}), '(image_input_size / orig_feature_map_size, tf.float32)\n', (8427, 8481), True, 'import tensorflow as tf\n'), ((8489, 8518), 'tensorflow.cast', 'tf.cast', (['joint[1]', 'tf.float32'], {}), '(joint[1], tf.float32)\n', (8496, 8518), True, 'import tensorflow as tf\n'), ((8521, 8582), 'tensorflow.cast', 'tf.cast', (['(image_input_size / orig_feature_map_size)', 'tf.float32'], {}), '(image_input_size / orig_feature_map_size, tf.float32)\n', (8528, 8582), True, 'import tensorflow as tf\n'), ((12142, 12162), 'tensorflow.sigmoid', 'tf.sigmoid', (['out_2_l3'], {}), '(out_2_l3)\n', (12152, 12162), True, 'import tensorflow as tf\n'), ((12293, 12311), 'tensorflow.shape', 'tf.shape', (['out_2_l3'], {}), '(out_2_l3)\n', (12301, 12311), True, 'import tensorflow as tf\n'), ((13258, 13282), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (13280, 13282), True, 'import tensorflow as tf\n'), ((13971, 13985), 'numpy.shape', 'np.shape', (['temp'], {}), '(temp)\n', (13979, 13985), True, 'import numpy as np\n'), ((1079, 1103), 'numpy.float32', 'np.float32', (['(rois / 255.0)'], {}), '(rois / 255.0)\n', (1089, 1103), True, 'import numpy as np\n'), ((1201, 1225), 'numpy.float32', 'np.float32', (['(rois / 255.0)'], {}), '(rois / 255.0)\n', (1211, 1225), True, 'import numpy as np\n'), ((8724, 8768), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (8755, 8768), True, 'import tensorflow as tf\n'), ((8879, 8907), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (8902, 8907), True, 'import tensorflow as tf\n'), ((10367, 10390), 'tensorflow.sigmoid', 'tf.sigmoid', (['out_b1h1_l3'], {}), '(out_b1h1_l3)\n', (10377, 10390), True, 'import tensorflow as tf\n'), ((10560, 10583), 'tensorflow.sigmoid', 'tf.sigmoid', (['out_b2h1_l3'], {}), '(out_b2h1_l3)\n', (10570, 10583), True, 'import tensorflow as tf\n'), ((10753, 10776), 'tensorflow.sigmoid', 'tf.sigmoid', (['out_b3h1_l3'], {}), '(out_b3h1_l3)\n', (10763, 10776), True, 'import tensorflow as tf\n'), ((11121, 11144), 'tensorflow.sigmoid', 'tf.sigmoid', (['out_b1h2_l3'], {}), '(out_b1h2_l3)\n', (11131, 11144), True, 'import tensorflow as tf\n'), ((11314, 11337), 'tensorflow.sigmoid', 'tf.sigmoid', (['out_b2h2_l3'], {}), '(out_b2h2_l3)\n', (11324, 11337), True, 'import tensorflow as tf\n'), ((11507, 11530), 'tensorflow.sigmoid', 'tf.sigmoid', (['out_b3h2_l3'], {}), '(out_b3h2_l3)\n', (11517, 11530), True, 'import tensorflow as tf\n'), ((12957, 12996), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.01)'], {}), '(0.01)\n', (12990, 12996), True, 'import tensorflow as tf\n'), ((13038, 13062), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (13060, 13062), True, 'import tensorflow as tf\n'), ((8320, 8331), 'tensorflow.shape', 'tf.shape', (['p'], {}), '(p)\n', (8328, 8331), True, 'import tensorflow as tf\n'), ((1999, 2035), 'tensorflow.floormod', 'tf.floormod', (['tmin', 'original_map_size'], {}), '(tmin, original_map_size)\n', (2010, 2035), True, 'import tensorflow as tf\n'), ((2049, 2085), 'tensorflow.floordiv', 'tf.floordiv', (['tmin', 'original_map_size'], {}), '(tmin, original_map_size)\n', (2060, 2085), True, 'import tensorflow as tf\n'), ((2369, 2404), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h2_l3[0]'], {}), '(out_2_l3, out_h2_l3[0])\n', (2380, 2404), True, 'import tensorflow as tf\n'), ((2497, 2532), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h1_l3[0]'], {}), '(out_2_l3, out_h1_l3[0])\n', (2508, 2532), True, 'import tensorflow as tf\n'), ((2662, 2697), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h2_l3[1]'], {}), '(out_2_l3, out_h2_l3[1])\n', (2673, 2697), True, 'import tensorflow as tf\n'), ((2790, 2825), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h1_l3[1]'], {}), '(out_2_l3, out_h1_l3[1])\n', (2801, 2825), True, 'import tensorflow as tf\n'), ((2955, 2990), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h2_l3[2]'], {}), '(out_2_l3, out_h2_l3[2])\n', (2966, 2990), True, 'import tensorflow as tf\n'), ((3083, 3118), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h1_l3[2]'], {}), '(out_2_l3, out_h1_l3[2])\n', (3094, 3118), True, 'import tensorflow as tf\n'), ((3248, 3283), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h2_l3[3]'], {}), '(out_2_l3, out_h2_l3[3])\n', (3259, 3283), True, 'import tensorflow as tf\n'), ((3376, 3411), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h1_l3[3]'], {}), '(out_2_l3, out_h1_l3[3])\n', (3387, 3411), True, 'import tensorflow as tf\n'), ((3541, 3576), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h2_l3[4]'], {}), '(out_2_l3, out_h2_l3[4])\n', (3552, 3576), True, 'import tensorflow as tf\n'), ((3669, 3704), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h1_l3[4]'], {}), '(out_2_l3, out_h1_l3[4])\n', (3680, 3704), True, 'import tensorflow as tf\n'), ((3834, 3869), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h2_l3[5]'], {}), '(out_2_l3, out_h2_l3[5])\n', (3845, 3869), True, 'import tensorflow as tf\n'), ((3962, 3997), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h1_l3[5]'], {}), '(out_2_l3, out_h1_l3[5])\n', (3973, 3997), True, 'import tensorflow as tf\n'), ((4127, 4162), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h2_l3[6]'], {}), '(out_2_l3, out_h2_l3[6])\n', (4138, 4162), True, 'import tensorflow as tf\n'), ((4255, 4290), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h1_l3[6]'], {}), '(out_2_l3, out_h1_l3[6])\n', (4266, 4290), True, 'import tensorflow as tf\n'), ((4420, 4455), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h2_l3[7]'], {}), '(out_2_l3, out_h2_l3[7])\n', (4431, 4455), True, 'import tensorflow as tf\n'), ((4548, 4583), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h1_l3[7]'], {}), '(out_2_l3, out_h1_l3[7])\n', (4559, 4583), True, 'import tensorflow as tf\n'), ((4713, 4748), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h2_l3[8]'], {}), '(out_2_l3, out_h2_l3[8])\n', (4724, 4748), True, 'import tensorflow as tf\n'), ((4841, 4876), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h1_l3[8]'], {}), '(out_2_l3, out_h1_l3[8])\n', (4852, 4876), True, 'import tensorflow as tf\n'), ((5006, 5041), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h2_l3[9]'], {}), '(out_2_l3, out_h2_l3[9])\n', (5017, 5041), True, 'import tensorflow as tf\n'), ((5134, 5169), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h1_l3[9]'], {}), '(out_2_l3, out_h1_l3[9])\n', (5145, 5169), True, 'import tensorflow as tf\n'), ((5301, 5337), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h2_l3[10]'], {}), '(out_2_l3, out_h2_l3[10])\n', (5312, 5337), True, 'import tensorflow as tf\n'), ((5431, 5467), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h1_l3[10]'], {}), '(out_2_l3, out_h1_l3[10])\n', (5442, 5467), True, 'import tensorflow as tf\n'), ((5599, 5635), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h2_l3[11]'], {}), '(out_2_l3, out_h2_l3[11])\n', (5610, 5635), True, 'import tensorflow as tf\n'), ((5729, 5765), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h1_l3[11]'], {}), '(out_2_l3, out_h1_l3[11])\n', (5740, 5765), True, 'import tensorflow as tf\n'), ((5897, 5933), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h2_l3[12]'], {}), '(out_2_l3, out_h2_l3[12])\n', (5908, 5933), True, 'import tensorflow as tf\n'), ((6027, 6063), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h1_l3[12]'], {}), '(out_2_l3, out_h1_l3[12])\n', (6038, 6063), True, 'import tensorflow as tf\n'), ((6195, 6231), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h2_l3[13]'], {}), '(out_2_l3, out_h2_l3[13])\n', (6206, 6231), True, 'import tensorflow as tf\n'), ((6325, 6361), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h1_l3[13]'], {}), '(out_2_l3, out_h1_l3[13])\n', (6336, 6361), True, 'import tensorflow as tf\n'), ((6493, 6529), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h2_l3[14]'], {}), '(out_2_l3, out_h2_l3[14])\n', (6504, 6529), True, 'import tensorflow as tf\n'), ((6623, 6659), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h1_l3[14]'], {}), '(out_2_l3, out_h1_l3[14])\n', (6634, 6659), True, 'import tensorflow as tf\n'), ((6791, 6827), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h2_l3[15]'], {}), '(out_2_l3, out_h2_l3[15])\n', (6802, 6827), True, 'import tensorflow as tf\n'), ((6921, 6957), 'tensorflow.subtract', 'tf.subtract', (['out_2_l3', 'out_h1_l3[15]'], {}), '(out_2_l3, out_h1_l3[15])\n', (6932, 6957), True, 'import tensorflow as tf\n')] |
import numpy as np
import scipy as sp
from argparse import ArgumentParser
from sklearn.datasets import load_breast_cancer, load_iris, load_boston, load_wine
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, roc_auc_score
from gp_lib.gp import ConstantMeanGP
from gp_lib.sparse import SparseGP
from gp_lib.kernels import *
if __name__ == "__main__":
np.random.seed(123)
argparser = ArgumentParser()
argparser.add_argument("--lr", type=float, default=0.1)
argparser.add_argument("--verbose", action="store_true")
argparser.add_argument("--tol", type=float, default=1e-3)
args = argparser.parse_args()
# standardize all features so that squared exponential kernel length scales are closer in
# magnitude to each other, and dot product kernels are scale-independent
x, y = load_boston(True)
idxs = np.arange(len(x))
np.random.shuffle(idxs)
x = x[idxs]
y = y[idxs]
m, n = x.shape
x = (x - np.mean(x, axis=0)) / np.std(x, axis=0)
x = np.c_[x, np.ones(len(x))]
x_tr, x_te, y_tr, y_te = train_test_split(x, y)
# anisotropic squared exponential kernel
# no constant is needed because most we expect most of the data to fit within the
# interpolation range anyway
noise_lvl = 1.0
kernel = AnisotropicSEKernel(np.ones(14))
gp = ConstantMeanGP(np.mean(y_tr), kernel, noise_lvl)
# train using all the data
result = gp.tune(x_tr, y_tr, verbose=False)
mean, var = gp.predict(x_te)
print(f"Loglik: {-result.fun:.2f}")
print(f"R2: {r2_score(y_te, mean):.2f}")
print(f"RMSE: {((mean - y_te) ** 2).mean() ** 0.5:.2f}")
# train using 1/10-th of the data as variational inputs, using the previous kernel
print("===")
gp = SparseGP(np.mean(y_tr), kernel, noise_lvl)
lower_bound = gp.fit(x_tr, y_tr, x_tr[:20], eval_gradient=False)
mean, var = gp.predict(x_te)
print(f"Lower bound: {lower_bound:.2f}")
print(f"R2: {r2_score(y_te, mean):.2f}")
print(f"RMSE: {((mean - y_te) ** 2).mean() ** 0.5:.2f}")
# train using 1/10-th of the data as variational inputs, learn kernel from scratch
print("===")
kernel = AnisotropicSEKernel(np.ones(14))
gp = SparseGP(np.mean(y_tr), kernel, noise_lvl)
result = gp.tune(x_tr, y_tr, x_tr[:20], verbose=False)
mean, var = gp.predict(x_te)
print(f"Lower bound: {-result.fun:.2f}")
print(f"R2: {r2_score(y_te, mean):.2f}")
print(f"RMSE: {((mean - y_te) ** 2).mean() ** 0.5:.2f}")
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.std",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.r2_score",
"numpy.ones",
"sklearn.datasets.load_boston",
"numpy.mean",
"numpy.random.shuffle"
] | [((397, 416), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (411, 416), True, 'import numpy as np\n'), ((434, 450), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (448, 450), False, 'from argparse import ArgumentParser\n'), ((851, 868), 'sklearn.datasets.load_boston', 'load_boston', (['(True)'], {}), '(True)\n', (862, 868), False, 'from sklearn.datasets import load_breast_cancer, load_iris, load_boston, load_wine\n'), ((902, 925), 'numpy.random.shuffle', 'np.random.shuffle', (['idxs'], {}), '(idxs)\n', (919, 925), True, 'import numpy as np\n'), ((1093, 1115), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {}), '(x, y)\n', (1109, 1115), False, 'from sklearn.model_selection import train_test_split\n'), ((1012, 1029), 'numpy.std', 'np.std', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1018, 1029), True, 'import numpy as np\n'), ((1334, 1345), 'numpy.ones', 'np.ones', (['(14)'], {}), '(14)\n', (1341, 1345), True, 'import numpy as np\n'), ((1371, 1384), 'numpy.mean', 'np.mean', (['y_tr'], {}), '(y_tr)\n', (1378, 1384), True, 'import numpy as np\n'), ((1787, 1800), 'numpy.mean', 'np.mean', (['y_tr'], {}), '(y_tr)\n', (1794, 1800), True, 'import numpy as np\n'), ((2212, 2223), 'numpy.ones', 'np.ones', (['(14)'], {}), '(14)\n', (2219, 2223), True, 'import numpy as np\n'), ((2243, 2256), 'numpy.mean', 'np.mean', (['y_tr'], {}), '(y_tr)\n', (2250, 2256), True, 'import numpy as np\n'), ((990, 1008), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (997, 1008), True, 'import numpy as np\n'), ((1575, 1595), 'sklearn.metrics.r2_score', 'r2_score', (['y_te', 'mean'], {}), '(y_te, mean)\n', (1583, 1595), False, 'from sklearn.metrics import r2_score, roc_auc_score\n'), ((1985, 2005), 'sklearn.metrics.r2_score', 'r2_score', (['y_te', 'mean'], {}), '(y_te, mean)\n', (1993, 2005), False, 'from sklearn.metrics import r2_score, roc_auc_score\n'), ((2431, 2451), 'sklearn.metrics.r2_score', 'r2_score', (['y_te', 'mean'], {}), '(y_te, mean)\n', (2439, 2451), False, 'from sklearn.metrics import r2_score, roc_auc_score\n')] |
import numpy as np
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
def measure(ploty, leftx, rightx):
'''
Calculates the curvature of polynomial functions in meters.
'''
left_fit_cr = np.polyfit(ploty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, rightx*xm_per_pix, 2)
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
##### Implement the calculation of R_curve (radius of curvature) #####
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
return left_curverad, right_curverad
def distance(image, left_fitx, right_fitx):
# get the center of the image
image_size_x = image.shape[1]
# get the last values of x from left and right
left_x = left_fitx[-1]
right_x = right_fitx[-1]
# compute the value of the center of the lane
center_x = left_x + ((right_x - left_x) / 2)
# compute the offset and ocnvert it to meters
offset = ((image_size_x / 2) - center_x) * xm_per_pix
return offset | [
"numpy.absolute",
"numpy.max",
"numpy.polyfit"
] | [((313, 366), 'numpy.polyfit', 'np.polyfit', (['(ploty * ym_per_pix)', '(leftx * xm_per_pix)', '(2)'], {}), '(ploty * ym_per_pix, leftx * xm_per_pix, 2)\n', (323, 366), True, 'import numpy as np\n'), ((379, 433), 'numpy.polyfit', 'np.polyfit', (['(ploty * ym_per_pix)', '(rightx * xm_per_pix)', '(2)'], {}), '(ploty * ym_per_pix, rightx * xm_per_pix, 2)\n', (389, 433), True, 'import numpy as np\n'), ((571, 584), 'numpy.max', 'np.max', (['ploty'], {}), '(ploty)\n', (577, 584), True, 'import numpy as np\n'), ((747, 778), 'numpy.absolute', 'np.absolute', (['(2 * left_fit_cr[0])'], {}), '(2 * left_fit_cr[0])\n', (758, 778), True, 'import numpy as np\n'), ((869, 901), 'numpy.absolute', 'np.absolute', (['(2 * right_fit_cr[0])'], {}), '(2 * right_fit_cr[0])\n', (880, 901), True, 'import numpy as np\n')] |
"""
Credits:
Copyright (c) 2017-2019 <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2019 <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import unittest
import os
import numpy as np
from eolearn.core import EOPatch, FeatureType
from eolearn.geometry import VectorToRaster, RasterToVector
from shapely.geometry import Polygon
class TestVectorToRaster(unittest.TestCase):
""" Testing transformation vector -> raster
"""
class TestCase:
"""
Container for each test case
"""
TEST_PATCH_FILENAME = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../example_data',
'TestEOPatch')
def __init__(self, name, task, img_min=0, img_max=0, img_mean=0, img_median=0, img_dtype=None, img_shape=None):
self.name = name
self.task = task
self.img_min = img_min
self.img_max = img_max
self.img_mean = img_mean
self.img_median = img_median
self.img_dtype = img_dtype
self.img_shape = img_shape
self.result = None
def execute(self):
eopatch = EOPatch.load(self.TEST_PATCH_FILENAME)
self.result = self.task.execute(eopatch)
self.result = self.task.execute(self.result)
@classmethod
def setUpClass(cls):
cls.vector_feature = FeatureType.VECTOR_TIMELESS, 'LULC'
cls.raster_feature = FeatureType.MASK_TIMELESS, 'RASTERIZED_LULC'
custom_dataframe = EOPatch.load(cls.TestCase.TEST_PATCH_FILENAME).vector_timeless['LULC']
custom_dataframe = custom_dataframe[(custom_dataframe['AREA'] < 10 ** 3)]
cls.test_cases = [
cls.TestCase('basic test',
VectorToRaster(cls.vector_feature, cls.raster_feature, values_column='LULC_ID',
raster_shape=(FeatureType.DATA, 'BANDS-S2-L1C'), no_data_value=20),
img_min=0, img_max=8, img_mean=2.33267, img_median=2, img_dtype=np.uint8,
img_shape=(101, 100, 1)),
cls.TestCase('single value filter, fixed shape',
VectorToRaster(cls.vector_feature, cls.raster_feature, values=8, values_column='LULC_ID',
raster_shape=(50, 50), no_data_value=20, write_to_existing=True,
raster_dtype=np.int32),
img_min=8, img_max=20, img_mean=19.76, img_median=20, img_dtype=np.int32,
img_shape=(50, 50, 1)),
cls.TestCase('multiple values filter, resolution, all touched',
VectorToRaster(cls.vector_feature, cls.raster_feature, values=[1, 5], values_column='LULC_ID',
raster_resolution='60m', no_data_value=13, raster_dtype=np.uint16,
all_touched=True, write_to_existing=False),
img_min=1, img_max=13, img_mean=12.7093, img_median=13, img_dtype=np.uint16,
img_shape=(17, 17, 1)),
cls.TestCase('deprecated parameters, single value, custom resolution',
VectorToRaster(vector_input=custom_dataframe, raster_feature=cls.raster_feature, values=14,
raster_resolution=(32, 15), no_data_value=-1, raster_dtype=np.int32),
img_min=-1, img_max=14, img_mean=-0.8411, img_median=-1, img_dtype=np.int32,
img_shape=(67, 31, 1)),
cls.TestCase('empty vector data test',
VectorToRaster(vector_input=custom_dataframe[
(custom_dataframe.LULC_NAME == 'some_none_existent_name')],
raster_feature=cls.raster_feature,
values_column='LULC_ID',
raster_shape=(FeatureType.DATA, 'BANDS-S2-L1C'), no_data_value=0),
img_min=0, img_max=0, img_mean=0, img_median=0, img_dtype=np.uint8,
img_shape=(101, 100, 1)),
cls.TestCase('negative polygon buffering',
VectorToRaster(vector_input=custom_dataframe,
raster_feature=cls.raster_feature,
values_column='LULC_ID',
buffer=-2,
raster_shape=(FeatureType.DATA, 'BANDS-S2-L1C'), no_data_value=0),
img_min=0, img_max=8, img_mean=0.0229, img_median=0, img_dtype=np.uint8,
img_shape=(101, 100, 1)),
cls.TestCase('positive polygon buffering',
VectorToRaster(vector_input=custom_dataframe,
raster_feature=cls.raster_feature,
values_column='LULC_ID',
buffer=2,
raster_shape=(FeatureType.DATA, 'BANDS-S2-L1C'), no_data_value=0),
img_min=0, img_max=8, img_mean=0.0664, img_median=0, img_dtype=np.uint8,
img_shape=(101, 100, 1)),
]
for test_case in cls.test_cases:
test_case.execute()
def test_result(self):
for test_case in self.test_cases:
delta = 1e-3
data = test_case.result[self.raster_feature[0]][self.raster_feature[1]]
min_val = np.amin(data)
with self.subTest(msg='Test case {}'.format(test_case.name)):
self.assertAlmostEqual(test_case.img_min, min_val, delta=delta,
msg="Expected min {}, got {}".format(test_case.img_min, min_val))
max_val = np.amax(data)
with self.subTest(msg='Test case {}'.format(test_case.name)):
self.assertAlmostEqual(test_case.img_max, max_val, delta=delta,
msg="Expected max {}, got {}".format(test_case.img_max, max_val))
mean_val = np.mean(data)
with self.subTest(msg='Test case {}'.format(test_case.name)):
self.assertAlmostEqual(test_case.img_mean, mean_val, delta=delta,
msg="Expected mean {}, got {}".format(test_case.img_mean, mean_val))
median_val = np.median(data)
with self.subTest(msg='Test case {}'.format(test_case.name)):
self.assertAlmostEqual(test_case.img_median, median_val, delta=delta,
msg="Expected median {}, got {}".format(test_case.img_median, median_val))
with self.subTest(msg='Test case {}'.format(test_case.name)):
self.assertTrue(test_case.img_dtype == data.dtype,
msg="Expected dtype {}, got {}".format(test_case.img_dtype, data.dtype))
with self.subTest(msg='Test case {}'.format(test_case.name)):
self.assertEqual(test_case.img_shape, data.shape,
msg="Expected shape {}, got {}".format(test_case.img_shape, data.shape))
def test_polygon_overlap(self):
patch_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../example_data', 'TestEOPatch')
patch = EOPatch.load(patch_path)
# create two test bboxes to overlap existing classes
bounds = patch.vector_timeless['LULC'].total_bounds
test_bounds1 = bounds[0] + 500, bounds[1] + 1000, bounds[2] - 1450, bounds[3] - 1650
test_bounds2 = bounds[0] + 300, bounds[1] + 1400, bounds[2] - 1750, bounds[3] - 1300
dframe = patch.vector_timeless['LULC'][0:50]
# override 0th row with a test polygon of class 10
test_row = dframe.index[0]
dframe.at[test_row, 'LULC_ID'] = 10
dframe.at[test_row, 'geometry'] = Polygon.from_bounds(*test_bounds1)
# override the last row with a test polygon of class 5
test_row = dframe.index[-1]
dframe.at[test_row, 'LULC_ID'] = 5
dframe.at[test_row, 'geometry'] = Polygon.from_bounds(*test_bounds2)
patch.vector_timeless['TEST'] = dframe
shape_feature = FeatureType.DATA, 'BANDS-S2-L1C'
# no overlap
patch = VectorToRaster(dframe[1:-1], (FeatureType.MASK_TIMELESS, 'OVERLAP_0'),
values_column='LULC_ID', raster_shape=shape_feature, overlap_value=5)(patch)
# overlap without taking intersection into account
patch = VectorToRaster(dframe, (FeatureType.MASK_TIMELESS, 'OVERLAP_1'),
values_column='LULC_ID', raster_shape=shape_feature, overlap_value=None)(patch)
# overlap without setting intersections to 0
patch = VectorToRaster(dframe, (FeatureType.MASK_TIMELESS, 'OVERLAP_2'),
values_column='LULC_ID', raster_shape=shape_feature, overlap_value=0)(patch)
# overlap without setting intersections to class 7
patch = VectorToRaster(dframe, (FeatureType.MASK_TIMELESS, 'OVERLAP_3'),
values_column='LULC_ID', raster_shape=shape_feature, overlap_value=7)(patch)
# separately render bboxes for comparisons in asserts
patch = VectorToRaster(dframe[:1], (FeatureType.MASK_TIMELESS, 'TEST_BBOX1'),
values_column='LULC_ID', raster_shape=shape_feature)(patch)
patch = VectorToRaster(dframe[-1:], (FeatureType.MASK_TIMELESS, 'TEST_BBOX2'),
values_column='LULC_ID', raster_shape=shape_feature)(patch)
bbox1 = patch.mask_timeless['TEST_BBOX1']
bbox2 = patch.mask_timeless['TEST_BBOX2']
overlap0 = patch.mask_timeless['OVERLAP_0']
overlap1 = patch.mask_timeless['OVERLAP_1']
overlap2 = patch.mask_timeless['OVERLAP_2']
# 4 gets partially covered by 5
self.assertTrue(np.count_nonzero(overlap0 == 4) > np.count_nonzero(overlap1 == 4))
# 2 doesn't get covered, stays the same
self.assertTrue(np.count_nonzero(overlap0 == 2) == np.count_nonzero(overlap1 == 2))
# 10 is bbox2 and it gets covered by other classes
self.assertTrue(np.count_nonzero(bbox1) > np.count_nonzero(overlap1 == 10))
# 5 is bbox1 and it is rendered on top of all others, so it doesn't get covered
self.assertTrue(np.count_nonzero(bbox2) == np.count_nonzero(overlap1 == 5))
# all classes have their parts intersected, so the sum should reduce
self.assertTrue(np.count_nonzero(bbox1) > np.count_nonzero(overlap2 == 10))
self.assertTrue(np.count_nonzero(bbox2) > np.count_nonzero(overlap2 == 5))
self.assertTrue(np.count_nonzero(overlap0 == 4) > np.count_nonzero(overlap2 == 4))
# 2 gets covered completely
self.assertTrue(np.count_nonzero(overlap2 == 2) == 0)
class TestRasterToVector(unittest.TestCase):
""" Testing transformation raster -> vector
"""
class TestCase:
"""
Container for each test case
"""
TEST_PATCH_FILENAME = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../example_data',
'TestEOPatch')
def __init__(self, name, task, feature, data_len, test_reverse=False):
self.name = name
self.task = task
self.feature = feature
self.data_len = data_len
self.test_reverse = test_reverse
self.result = None
@property
def vector_feature(self):
feature_type = FeatureType.VECTOR_TIMELESS if self.feature[0].is_timeless() else FeatureType.VECTOR
return feature_type, self.feature[-1]
def execute(self):
eopatch = EOPatch.load(self.TEST_PATCH_FILENAME)
self.result = self.task.execute(eopatch)
@classmethod
def setUpClass(cls):
feature1 = FeatureType.MASK_TIMELESS, 'LULC', 'NEW_LULC'
feature2 = FeatureType.MASK, 'CLM'
cls.test_cases = [
cls.TestCase('reverse test',
RasterToVector(feature1), feature=feature1, data_len=126, test_reverse=True),
cls.TestCase('parameters test',
RasterToVector(feature2, values=[1, 2], values_column='IS_CLOUD', raster_dtype=np.int16,
connectivity=8),
feature=feature2, data_len=54),
]
for test_case in cls.test_cases:
test_case.execute()
def test_result(self):
for test_case in self.test_cases:
ft, fn = test_case.vector_feature
data = test_case.result[ft][fn]
data_len = len(data.index)
with self.subTest(msg='Test case {}'.format(test_case.name)):
self.assertEqual(test_case.data_len, data_len,
msg="Expected number of shapes {}, got {}".format(test_case.data_len, data_len))
def test_transformation_back(self):
for test_case in self.test_cases:
if test_case.test_reverse:
with self.subTest(msg='Test case {}'.format(test_case.name)):
new_raster_feature = test_case.feature[0], '{}_NEW'.format(test_case.feature[1])
old_raster_feature = test_case.feature[:2]
vector2raster_task = VectorToRaster(test_case.vector_feature, new_raster_feature,
values_column=test_case.task.values_column,
raster_shape=old_raster_feature)
eop = vector2raster_task(test_case.result)
new_raster = eop[new_raster_feature[0]][new_raster_feature[1]]
old_raster = eop[old_raster_feature[0]][old_raster_feature[1]]
self.assertTrue(np.array_equal(new_raster, old_raster),
msg='Old and new raster features should be the same')
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.count_nonzero",
"numpy.amin",
"numpy.median",
"shapely.geometry.Polygon.from_bounds",
"eolearn.core.EOPatch.load",
"os.path.realpath",
"numpy.amax",
"numpy.mean",
"numpy.array_equal",
"eolearn.geometry.VectorToRaster",
"eolearn.geometry.RasterToVector"
] | [((14518, 14533), 'unittest.main', 'unittest.main', ([], {}), '()\n', (14531, 14533), False, 'import unittest\n'), ((7715, 7739), 'eolearn.core.EOPatch.load', 'EOPatch.load', (['patch_path'], {}), '(patch_path)\n', (7727, 7739), False, 'from eolearn.core import EOPatch, FeatureType\n'), ((8283, 8317), 'shapely.geometry.Polygon.from_bounds', 'Polygon.from_bounds', (['*test_bounds1'], {}), '(*test_bounds1)\n', (8302, 8317), False, 'from shapely.geometry import Polygon\n'), ((8503, 8537), 'shapely.geometry.Polygon.from_bounds', 'Polygon.from_bounds', (['*test_bounds2'], {}), '(*test_bounds2)\n', (8522, 8537), False, 'from shapely.geometry import Polygon\n'), ((1380, 1418), 'eolearn.core.EOPatch.load', 'EOPatch.load', (['self.TEST_PATCH_FILENAME'], {}), '(self.TEST_PATCH_FILENAME)\n', (1392, 1418), False, 'from eolearn.core import EOPatch, FeatureType\n'), ((5862, 5875), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (5869, 5875), True, 'import numpy as np\n'), ((6158, 6171), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (6165, 6171), True, 'import numpy as np\n'), ((6455, 6468), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (6462, 6468), True, 'import numpy as np\n'), ((6759, 6774), 'numpy.median', 'np.median', (['data'], {}), '(data)\n', (6768, 6774), True, 'import numpy as np\n'), ((8682, 8826), 'eolearn.geometry.VectorToRaster', 'VectorToRaster', (['dframe[1:-1]', "(FeatureType.MASK_TIMELESS, 'OVERLAP_0')"], {'values_column': '"""LULC_ID"""', 'raster_shape': 'shape_feature', 'overlap_value': '(5)'}), "(dframe[1:-1], (FeatureType.MASK_TIMELESS, 'OVERLAP_0'),\n values_column='LULC_ID', raster_shape=shape_feature, overlap_value=5)\n", (8696, 8826), False, 'from eolearn.geometry import VectorToRaster, RasterToVector\n'), ((8937, 9078), 'eolearn.geometry.VectorToRaster', 'VectorToRaster', (['dframe', "(FeatureType.MASK_TIMELESS, 'OVERLAP_1')"], {'values_column': '"""LULC_ID"""', 'raster_shape': 'shape_feature', 'overlap_value': 'None'}), "(dframe, (FeatureType.MASK_TIMELESS, 'OVERLAP_1'),\n values_column='LULC_ID', raster_shape=shape_feature, overlap_value=None)\n", (8951, 9078), False, 'from eolearn.geometry import VectorToRaster, RasterToVector\n'), ((9183, 9321), 'eolearn.geometry.VectorToRaster', 'VectorToRaster', (['dframe', "(FeatureType.MASK_TIMELESS, 'OVERLAP_2')"], {'values_column': '"""LULC_ID"""', 'raster_shape': 'shape_feature', 'overlap_value': '(0)'}), "(dframe, (FeatureType.MASK_TIMELESS, 'OVERLAP_2'),\n values_column='LULC_ID', raster_shape=shape_feature, overlap_value=0)\n", (9197, 9321), False, 'from eolearn.geometry import VectorToRaster, RasterToVector\n'), ((9432, 9570), 'eolearn.geometry.VectorToRaster', 'VectorToRaster', (['dframe', "(FeatureType.MASK_TIMELESS, 'OVERLAP_3')"], {'values_column': '"""LULC_ID"""', 'raster_shape': 'shape_feature', 'overlap_value': '(7)'}), "(dframe, (FeatureType.MASK_TIMELESS, 'OVERLAP_3'),\n values_column='LULC_ID', raster_shape=shape_feature, overlap_value=7)\n", (9446, 9570), False, 'from eolearn.geometry import VectorToRaster, RasterToVector\n'), ((9684, 9810), 'eolearn.geometry.VectorToRaster', 'VectorToRaster', (['dframe[:1]', "(FeatureType.MASK_TIMELESS, 'TEST_BBOX1')"], {'values_column': '"""LULC_ID"""', 'raster_shape': 'shape_feature'}), "(dframe[:1], (FeatureType.MASK_TIMELESS, 'TEST_BBOX1'),\n values_column='LULC_ID', raster_shape=shape_feature)\n", (9698, 9810), False, 'from eolearn.geometry import VectorToRaster, RasterToVector\n'), ((9861, 9988), 'eolearn.geometry.VectorToRaster', 'VectorToRaster', (['dframe[-1:]', "(FeatureType.MASK_TIMELESS, 'TEST_BBOX2')"], {'values_column': '"""LULC_ID"""', 'raster_shape': 'shape_feature'}), "(dframe[-1:], (FeatureType.MASK_TIMELESS, 'TEST_BBOX2'),\n values_column='LULC_ID', raster_shape=shape_feature)\n", (9875, 9988), False, 'from eolearn.geometry import VectorToRaster, RasterToVector\n'), ((12209, 12247), 'eolearn.core.EOPatch.load', 'EOPatch.load', (['self.TEST_PATCH_FILENAME'], {}), '(self.TEST_PATCH_FILENAME)\n', (12221, 12247), False, 'from eolearn.core import EOPatch, FeatureType\n'), ((781, 807), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (797, 807), False, 'import os\n'), ((1739, 1785), 'eolearn.core.EOPatch.load', 'EOPatch.load', (['cls.TestCase.TEST_PATCH_FILENAME'], {}), '(cls.TestCase.TEST_PATCH_FILENAME)\n', (1751, 1785), False, 'from eolearn.core import EOPatch, FeatureType\n'), ((1984, 2139), 'eolearn.geometry.VectorToRaster', 'VectorToRaster', (['cls.vector_feature', 'cls.raster_feature'], {'values_column': '"""LULC_ID"""', 'raster_shape': "(FeatureType.DATA, 'BANDS-S2-L1C')", 'no_data_value': '(20)'}), "(cls.vector_feature, cls.raster_feature, values_column=\n 'LULC_ID', raster_shape=(FeatureType.DATA, 'BANDS-S2-L1C'),\n no_data_value=20)\n", (1998, 2139), False, 'from eolearn.geometry import VectorToRaster, RasterToVector\n'), ((2408, 2593), 'eolearn.geometry.VectorToRaster', 'VectorToRaster', (['cls.vector_feature', 'cls.raster_feature'], {'values': '(8)', 'values_column': '"""LULC_ID"""', 'raster_shape': '(50, 50)', 'no_data_value': '(20)', 'write_to_existing': '(True)', 'raster_dtype': 'np.int32'}), "(cls.vector_feature, cls.raster_feature, values=8,\n values_column='LULC_ID', raster_shape=(50, 50), no_data_value=20,\n write_to_existing=True, raster_dtype=np.int32)\n", (2422, 2593), False, 'from eolearn.geometry import VectorToRaster, RasterToVector\n'), ((2916, 3128), 'eolearn.geometry.VectorToRaster', 'VectorToRaster', (['cls.vector_feature', 'cls.raster_feature'], {'values': '[1, 5]', 'values_column': '"""LULC_ID"""', 'raster_resolution': '"""60m"""', 'no_data_value': '(13)', 'raster_dtype': 'np.uint16', 'all_touched': '(True)', 'write_to_existing': '(False)'}), "(cls.vector_feature, cls.raster_feature, values=[1, 5],\n values_column='LULC_ID', raster_resolution='60m', no_data_value=13,\n raster_dtype=np.uint16, all_touched=True, write_to_existing=False)\n", (2930, 3128), False, 'from eolearn.geometry import VectorToRaster, RasterToVector\n'), ((3461, 3630), 'eolearn.geometry.VectorToRaster', 'VectorToRaster', ([], {'vector_input': 'custom_dataframe', 'raster_feature': 'cls.raster_feature', 'values': '(14)', 'raster_resolution': '(32, 15)', 'no_data_value': '(-1)', 'raster_dtype': 'np.int32'}), '(vector_input=custom_dataframe, raster_feature=cls.\n raster_feature, values=14, raster_resolution=(32, 15), no_data_value=-1,\n raster_dtype=np.int32)\n', (3475, 3630), False, 'from eolearn.geometry import VectorToRaster, RasterToVector\n'), ((3890, 4131), 'eolearn.geometry.VectorToRaster', 'VectorToRaster', ([], {'vector_input': "custom_dataframe[custom_dataframe.LULC_NAME == 'some_none_existent_name']", 'raster_feature': 'cls.raster_feature', 'values_column': '"""LULC_ID"""', 'raster_shape': "(FeatureType.DATA, 'BANDS-S2-L1C')", 'no_data_value': '(0)'}), "(vector_input=custom_dataframe[custom_dataframe.LULC_NAME ==\n 'some_none_existent_name'], raster_feature=cls.raster_feature,\n values_column='LULC_ID', raster_shape=(FeatureType.DATA, 'BANDS-S2-L1C'\n ), no_data_value=0)\n", (3904, 4131), False, 'from eolearn.geometry import VectorToRaster, RasterToVector\n'), ((4511, 4703), 'eolearn.geometry.VectorToRaster', 'VectorToRaster', ([], {'vector_input': 'custom_dataframe', 'raster_feature': 'cls.raster_feature', 'values_column': '"""LULC_ID"""', 'buffer': '(-2)', 'raster_shape': "(FeatureType.DATA, 'BANDS-S2-L1C')", 'no_data_value': '(0)'}), "(vector_input=custom_dataframe, raster_feature=cls.\n raster_feature, values_column='LULC_ID', buffer=-2, raster_shape=(\n FeatureType.DATA, 'BANDS-S2-L1C'), no_data_value=0)\n", (4525, 4703), False, 'from eolearn.geometry import VectorToRaster, RasterToVector\n'), ((5084, 5275), 'eolearn.geometry.VectorToRaster', 'VectorToRaster', ([], {'vector_input': 'custom_dataframe', 'raster_feature': 'cls.raster_feature', 'values_column': '"""LULC_ID"""', 'buffer': '(2)', 'raster_shape': "(FeatureType.DATA, 'BANDS-S2-L1C')", 'no_data_value': '(0)'}), "(vector_input=custom_dataframe, raster_feature=cls.\n raster_feature, values_column='LULC_ID', buffer=2, raster_shape=(\n FeatureType.DATA, 'BANDS-S2-L1C'), no_data_value=0)\n", (5098, 5275), False, 'from eolearn.geometry import VectorToRaster, RasterToVector\n'), ((7630, 7656), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (7646, 7656), False, 'import os\n'), ((10346, 10377), 'numpy.count_nonzero', 'np.count_nonzero', (['(overlap0 == 4)'], {}), '(overlap0 == 4)\n', (10362, 10377), True, 'import numpy as np\n'), ((10380, 10411), 'numpy.count_nonzero', 'np.count_nonzero', (['(overlap1 == 4)'], {}), '(overlap1 == 4)\n', (10396, 10411), True, 'import numpy as np\n'), ((10485, 10516), 'numpy.count_nonzero', 'np.count_nonzero', (['(overlap0 == 2)'], {}), '(overlap0 == 2)\n', (10501, 10516), True, 'import numpy as np\n'), ((10520, 10551), 'numpy.count_nonzero', 'np.count_nonzero', (['(overlap1 == 2)'], {}), '(overlap1 == 2)\n', (10536, 10551), True, 'import numpy as np\n'), ((10636, 10659), 'numpy.count_nonzero', 'np.count_nonzero', (['bbox1'], {}), '(bbox1)\n', (10652, 10659), True, 'import numpy as np\n'), ((10662, 10694), 'numpy.count_nonzero', 'np.count_nonzero', (['(overlap1 == 10)'], {}), '(overlap1 == 10)\n', (10678, 10694), True, 'import numpy as np\n'), ((10808, 10831), 'numpy.count_nonzero', 'np.count_nonzero', (['bbox2'], {}), '(bbox2)\n', (10824, 10831), True, 'import numpy as np\n'), ((10835, 10866), 'numpy.count_nonzero', 'np.count_nonzero', (['(overlap1 == 5)'], {}), '(overlap1 == 5)\n', (10851, 10866), True, 'import numpy as np\n'), ((10970, 10993), 'numpy.count_nonzero', 'np.count_nonzero', (['bbox1'], {}), '(bbox1)\n', (10986, 10993), True, 'import numpy as np\n'), ((10996, 11028), 'numpy.count_nonzero', 'np.count_nonzero', (['(overlap2 == 10)'], {}), '(overlap2 == 10)\n', (11012, 11028), True, 'import numpy as np\n'), ((11054, 11077), 'numpy.count_nonzero', 'np.count_nonzero', (['bbox2'], {}), '(bbox2)\n', (11070, 11077), True, 'import numpy as np\n'), ((11080, 11111), 'numpy.count_nonzero', 'np.count_nonzero', (['(overlap2 == 5)'], {}), '(overlap2 == 5)\n', (11096, 11111), True, 'import numpy as np\n'), ((11137, 11168), 'numpy.count_nonzero', 'np.count_nonzero', (['(overlap0 == 4)'], {}), '(overlap0 == 4)\n', (11153, 11168), True, 'import numpy as np\n'), ((11171, 11202), 'numpy.count_nonzero', 'np.count_nonzero', (['(overlap2 == 4)'], {}), '(overlap2 == 4)\n', (11187, 11202), True, 'import numpy as np\n'), ((11264, 11295), 'numpy.count_nonzero', 'np.count_nonzero', (['(overlap2 == 2)'], {}), '(overlap2 == 2)\n', (11280, 11295), True, 'import numpy as np\n'), ((11545, 11571), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (11561, 11571), False, 'import os\n'), ((12546, 12570), 'eolearn.geometry.RasterToVector', 'RasterToVector', (['feature1'], {}), '(feature1)\n', (12560, 12570), False, 'from eolearn.geometry import VectorToRaster, RasterToVector\n'), ((12693, 12801), 'eolearn.geometry.RasterToVector', 'RasterToVector', (['feature2'], {'values': '[1, 2]', 'values_column': '"""IS_CLOUD"""', 'raster_dtype': 'np.int16', 'connectivity': '(8)'}), "(feature2, values=[1, 2], values_column='IS_CLOUD',\n raster_dtype=np.int16, connectivity=8)\n", (12707, 12801), False, 'from eolearn.geometry import VectorToRaster, RasterToVector\n'), ((13838, 13980), 'eolearn.geometry.VectorToRaster', 'VectorToRaster', (['test_case.vector_feature', 'new_raster_feature'], {'values_column': 'test_case.task.values_column', 'raster_shape': 'old_raster_feature'}), '(test_case.vector_feature, new_raster_feature, values_column=\n test_case.task.values_column, raster_shape=old_raster_feature)\n', (13852, 13980), False, 'from eolearn.geometry import VectorToRaster, RasterToVector\n'), ((14355, 14393), 'numpy.array_equal', 'np.array_equal', (['new_raster', 'old_raster'], {}), '(new_raster, old_raster)\n', (14369, 14393), True, 'import numpy as np\n')] |
#! /usr/bin/env python2
import ioutil
import cv2
import dlib
import base64
import numpy as np
import json
from camShift import camshiftTracker, meanshiftTracker
from demo_config import Config
LOG = ioutil.getLogger(__name__)
def clamp(n, minn, maxn):
return max(min(maxn, n), minn)
# Tracking
class TrackerInitializer(object):
def __init__(self, prev_frame, prev_roi, frame):
self.prev_frame = prev_frame
self.prev_roi = prev_roi
self.frame = frame
def create_dlib_tracker(frame, roi):
tracker = dlib.correlation_tracker()
(roi_x1, roi_y1, roi_x2, roi_y2) = roi
tracker.start_track(frame,
dlib.rectangle(roi_x1, roi_y1, roi_x2, roi_y2))
return tracker
@ioutil.timeit
def create_tracker(frame, roi, use_dlib=False):
if not (isinstance(roi, dlib.rectangle)):
bx = tuple_to_drectangle(roi)
else:
bx = roi
if use_dlib:
tracker = dlib.correlation_tracker()
else:
tracker = meanshiftTracker()
tracker.start_track(frame, bx)
LOG.debug('create tracker received: {}'.format(bx))
return tracker
def create_trackers(frame, rois, use_dlib=False):
trackers = []
for roi in rois:
if use_dlib:
tracker = create_tracker(frame, roi, use_dlib=True)
else:
tracker = create_tracker(frame, roi)
trackers.append(tracker)
return trackers
# dlib wrappers
def drectangle_to_tuple(drectangle):
if isinstance(drectangle, dlib.rectangle) or isinstance(drectangle, dlib.drectangle):
cur_roi = (int(drectangle.left()),
int(drectangle.top()),
int(drectangle.right()),
int(drectangle.bottom()))
return cur_roi
else:
return drectangle
def tuple_to_drectangle(bx):
if isinstance(bx, tuple):
(roi_x1, roi_y1, roi_x2, roi_y2) = bx
return dlib.rectangle(roi_x1, roi_y1, roi_x2, roi_y2)
else:
return bx
# distance
def euclidean_distance_square(roi1, roi2):
result = abs(roi1[0] - roi2[0])**2 + abs(roi1[1] - roi2[1])**2
return result
# np helpers
def np_array_to_jpeg_string(frame):
# face_img = Image.fromarray(frame)
# sio = StringIO.StringIO()
# face_img.save(sio, 'JPEG')
# jpeg_img = sio.getvalue()
_, jpeg_img = cv2.imencode('.jpg', frame)
face_string = base64.b64encode(jpeg_img)
return face_string
def np_array_to_string(frame):
frame_bytes = frame.tobytes()
face_string = base64.b64encode(frame_bytes)
return face_string
def np_array_to_jpeg_data_url(frame):
face_string = np_array_to_jpeg_string(frame)
face_string = "data:image/jpeg;base64," + face_string
return face_string
# cv2 helpers
def imwrite_rgb(path, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
sys.stdout.write('writing img to {}\n'.format(path))
sys.stdout.flush()
cv2.imwrite(path, frame)
def draw_rois(img, rois, hint=None):
for roi in rois:
(x1, y1, x2, y2) = tuple(roi)
if hint:
cv2.putText(img, hint, (x1, y1),
cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))
cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0))
# face detection
MIN_WIDTH_THRESHOLD = 3
MIN_HEIGHT_THRESHOLD = 3
def is_small_face(roi):
(x1, y1, x2, y2) = roi
# has negative number
if (x1 < 0 or y1 < 0 or x2 < 0 or y2 < 0):
return True
# region too small
if (abs(x2 - x1) < MIN_WIDTH_THRESHOLD or abs(y2 - y1) < MIN_HEIGHT_THRESHOLD):
LOG.debug('face too small discard')
return True
return False
def filter_small_faces(dets):
filtered_dets = []
for i, d in enumerate(dets):
if not is_small_face(d):
filtered_dets.append(d)
return filtered_dets
@ioutil.timeit
def detect_faces(frame, detector, largest_only=False, upsample_num_times=0, adjust_threshold=0.0):
# upsampling will take a lot of time
# http://dlib.net/face_detector.py.html
dets, scores, sub_detector_indices = detector.run(
frame, upsample_num_times, adjust_threshold)
if largest_only:
if (len(dets) > 0):
max_det = max(dets, key=lambda rect: rect.width() * rect.height())
dets = [max_det]
dets = map(lambda d: (int(d.left()), int(d.top()),
int(d.right()), int(d.bottom())), dets)
rois = filter_small_faces(dets)
LOG.debug('# face detected : {}'.format(len(rois)))
rois = sorted(rois)
return rois
def is_gray_scale(img):
if len(img.shape) == 2:
return True
else:
return False
# merge old facerois with new face rois
def merge_faceROIs(old_faceROIs, new_faceROIs):
pass
def get_image_region(img, drect):
(x1, y1, x2, y2) = drectangle_to_tuple(drect)
h, w, _ = img.shape
x1 = clamp(x1, 0, w - 1)
y1 = clamp(y1, 0, h - 1)
x2 = clamp(x2, 0, w - 1)
y2 = clamp(y2, 0, h - 1)
return img[y1:y2 + 1, x1:x2 + 1]
# the lower the number is, the higher of blurness
def variance_of_laplacian(bgr_img):
# compute the Laplacian of the image and then return the focus
# measure, which is simply the variance of the Laplacian
if len(bgr_img.shape) == 3:
grey_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
else:
grey_img = bgr_img
return cv2.Laplacian(grey_img, cv2.CV_64F).var()
# detect if an image is blurry
# a higher threshold, meaning a higher demand for image being clearer
def is_clear(bgr_img, threshold=40):
if variance_of_laplacian(bgr_img) < threshold:
return False
return True
class FaceROI(object):
PROFILE_FACE = 'profile_face'
# dlib arbitrary number
TRACKER_CONFIDENCE_THRESHOLD = 2
def __init__(self, roi, data=None, name=None, tracker=None, frid=-1):
self.roi = drectangle_to_tuple(roi)
self.data = data
self.name = name
self.tracker = tracker
self.swap_tmp_data = None
self.frid = frid
self.low_confidence = False
def __copy__(self):
newone = FaceROI(self.roi,
data=None,
name=self.name,
tracker=None,
frid=self.frid)
return newone
# returned ROI may go out of bounds --> representing failure of tracking
def get_json(self, send_data=False):
(roi_x1, roi_y1, roi_x2, roi_y2) = self.roi
msg = {
'roi_x1': roi_x1,
'roi_y1': roi_y1,
'roi_x2': roi_x2,
'roi_y2': roi_y2,
'name': self.name
}
if send_data:
msg['data'] = np_array_to_jpeg_string(self.data)
return json.dumps(msg)
# return the center location of the face
def get_location(self):
(roi_x1, roi_y1, roi_x2, roi_y2) = self.roi
return ((roi_x1 + roi_x2) / 2, (roi_y1 + roi_y2) / 2)
def __str__(self):
return 'frid {}: {}, {}'.format(self.frid, self.roi, self.name)
def __repr__(self):
return 'frid {}: {}, {}'.format(self.frid, self.roi, self.name)
def update_tracker_failure(self, conf):
self.low_confidence = (
self.name != self.PROFILE_FACE and conf < self.TRACKER_CONFIDENCE_THRESHOLD)
return self.low_confidence
class FaceFrame(object):
def __init__(self, fid, frame, faceROIs):
# frame id
self.fid = fid
self.frame = frame
self.faceROIs = faceROIs
def __repr__(self):
return '{}: {}'.format(self.fid, self.faceROIs)
def __str__(self):
return '{}: {}'.format(self.fid, self.faceROIs)
def has_bx(self, bx):
for faceROI in self.faceROIs:
if iou_area(faceROI.roi, bx) > 0.5:
return True
return False
def enlarge_roi(roi, padding, frame_width, frame_height):
(x1, y1, x2, y2) = roi
x1 = max(x1 - padding, 0)
y1 = max(y1 - padding, 0)
x2 = min(x2 + padding, frame_width - 1)
y2 = min(y2 + padding, frame_height - 1)
return (x1, y1, x2, y2)
def clamp_roi(roi, frame_width, frame_height):
(x1, y1, x2, y2) = roi
x1 = clamp(x1, 0, frame_width - 1)
y1 = clamp(y1, 0, frame_height - 1)
x2 = clamp(x2, 0, frame_width - 1)
y2 = clamp(y2, 0, frame_height - 1)
return (x1, y1, x2, y2)
def iou_area(a, b):
# compute overlaps
# intersection
a = drectangle_to_tuple(a)
b = drectangle_to_tuple(b)
ixmin = np.maximum(a[0], b[0])
iymin = np.maximum(a[1], b[1])
ixmax = np.minimum(a[2], b[2])
iymax = np.minimum(a[3], b[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
uni = ((b[2] - b[0] + 1.) * (b[3] - b[1] + 1.) +
(a[2] - a[0] + 1.) *
(a[3] - a[1] + 1.) - inters)
overlaps = clamp(1.0 * inters / uni, 0, 1)
return overlaps
def enlarge_drectangles(sm_dets, enlarge_ratio):
if isinstance(sm_dets, dlib.rectangles):
dets = dlib.rectangles()
for sm_det in sm_dets:
dets.append(dlib.rectangle(
int(sm_det.left() * enlarge_ratio),
int(sm_det.top() * enlarge_ratio),
int(sm_det.right() * enlarge_ratio),
int(sm_det.bottom() * enlarge_ratio),
))
return dets
elif isinstance(sm_dets, dlib.rectangle):
det = dlib.rectangle(
int(sm_det.left() * enlarge_ratio),
int(sm_det.top() * enlarge_ratio),
int(sm_det.right() * enlarge_ratio),
int(sm_det.bottom() * enlarge_ratio))
return det
elif isinstance(sm_dets, tuple) and len(sm_dets) == 4:
return (sm_dets[0] * enlarge_ratio,
sm_dets[1] * enlarge_ratio,
sm_dets[2] * enlarge_ratio,
sm_dets[3] * enlarge_ratio)
else:
raise TypeError(
'sm_dets needs to be type dlib.drectangles or dlib.rectangle. but is {}'.format(type(sm_dets)))
def downsample(rgb_img, shrink_ratio):
return cv2.resize(rgb_img, None, fx=1.0 / shrink_ratio, fy=1.0 / shrink_ratio)
| [
"numpy.minimum",
"numpy.maximum",
"cv2.putText",
"cv2.cvtColor",
"cv2.imwrite",
"camShift.meanshiftTracker",
"ioutil.getLogger",
"dlib.rectangles",
"json.dumps",
"cv2.rectangle",
"base64.b64encode",
"cv2.imencode",
"dlib.correlation_tracker",
"dlib.rectangle",
"cv2.resize",
"cv2.Laplac... | [((201, 227), 'ioutil.getLogger', 'ioutil.getLogger', (['__name__'], {}), '(__name__)\n', (217, 227), False, 'import ioutil\n'), ((540, 566), 'dlib.correlation_tracker', 'dlib.correlation_tracker', ([], {}), '()\n', (564, 566), False, 'import dlib\n'), ((2345, 2372), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'frame'], {}), "('.jpg', frame)\n", (2357, 2372), False, 'import cv2\n'), ((2391, 2417), 'base64.b64encode', 'base64.b64encode', (['jpeg_img'], {}), '(jpeg_img)\n', (2407, 2417), False, 'import base64\n'), ((2526, 2555), 'base64.b64encode', 'base64.b64encode', (['frame_bytes'], {}), '(frame_bytes)\n', (2542, 2555), False, 'import base64\n'), ((2808, 2846), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2BGR'], {}), '(frame, cv2.COLOR_RGB2BGR)\n', (2820, 2846), False, 'import cv2\n'), ((2931, 2955), 'cv2.imwrite', 'cv2.imwrite', (['path', 'frame'], {}), '(path, frame)\n', (2942, 2955), False, 'import cv2\n'), ((8503, 8525), 'numpy.maximum', 'np.maximum', (['a[0]', 'b[0]'], {}), '(a[0], b[0])\n', (8513, 8525), True, 'import numpy as np\n'), ((8538, 8560), 'numpy.maximum', 'np.maximum', (['a[1]', 'b[1]'], {}), '(a[1], b[1])\n', (8548, 8560), True, 'import numpy as np\n'), ((8573, 8595), 'numpy.minimum', 'np.minimum', (['a[2]', 'b[2]'], {}), '(a[2], b[2])\n', (8583, 8595), True, 'import numpy as np\n'), ((8608, 8630), 'numpy.minimum', 'np.minimum', (['a[3]', 'b[3]'], {}), '(a[3], b[3])\n', (8618, 8630), True, 'import numpy as np\n'), ((8640, 8676), 'numpy.maximum', 'np.maximum', (['(ixmax - ixmin + 1.0)', '(0.0)'], {}), '(ixmax - ixmin + 1.0, 0.0)\n', (8650, 8676), True, 'import numpy as np\n'), ((8684, 8720), 'numpy.maximum', 'np.maximum', (['(iymax - iymin + 1.0)', '(0.0)'], {}), '(iymax - iymin + 1.0, 0.0)\n', (8694, 8720), True, 'import numpy as np\n'), ((10098, 10169), 'cv2.resize', 'cv2.resize', (['rgb_img', 'None'], {'fx': '(1.0 / shrink_ratio)', 'fy': '(1.0 / shrink_ratio)'}), '(rgb_img, None, fx=1.0 / shrink_ratio, fy=1.0 / shrink_ratio)\n', (10108, 10169), False, 'import cv2\n'), ((665, 711), 'dlib.rectangle', 'dlib.rectangle', (['roi_x1', 'roi_y1', 'roi_x2', 'roi_y2'], {}), '(roi_x1, roi_y1, roi_x2, roi_y2)\n', (679, 711), False, 'import dlib\n'), ((944, 970), 'dlib.correlation_tracker', 'dlib.correlation_tracker', ([], {}), '()\n', (968, 970), False, 'import dlib\n'), ((999, 1017), 'camShift.meanshiftTracker', 'meanshiftTracker', ([], {}), '()\n', (1015, 1017), False, 'from camShift import camshiftTracker, meanshiftTracker\n'), ((1921, 1967), 'dlib.rectangle', 'dlib.rectangle', (['roi_x1', 'roi_y1', 'roi_x2', 'roi_y2'], {}), '(roi_x1, roi_y1, roi_x2, roi_y2)\n', (1935, 1967), False, 'import dlib\n'), ((3188, 3239), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y1)', '(x2, y2)', '(255, 0, 0)'], {}), '(img, (x1, y1), (x2, y2), (255, 0, 0))\n', (3201, 3239), False, 'import cv2\n'), ((5279, 5320), 'cv2.cvtColor', 'cv2.cvtColor', (['bgr_img', 'cv2.COLOR_BGR2GRAY'], {}), '(bgr_img, cv2.COLOR_BGR2GRAY)\n', (5291, 5320), False, 'import cv2\n'), ((6743, 6758), 'json.dumps', 'json.dumps', (['msg'], {}), '(msg)\n', (6753, 6758), False, 'import json\n'), ((9045, 9062), 'dlib.rectangles', 'dlib.rectangles', ([], {}), '()\n', (9060, 9062), False, 'import dlib\n'), ((3083, 3155), 'cv2.putText', 'cv2.putText', (['img', 'hint', '(x1, y1)', 'cv2.FONT_HERSHEY_PLAIN', '(1)', '(255, 0, 0)'], {}), '(img, hint, (x1, y1), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n', (3094, 3155), False, 'import cv2\n'), ((5369, 5404), 'cv2.Laplacian', 'cv2.Laplacian', (['grey_img', 'cv2.CV_64F'], {}), '(grey_img, cv2.CV_64F)\n', (5382, 5404), False, 'import cv2\n')] |
import numpy as np
from tqdm import tqdm
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import TensorDataset, DataLoader
import torchvision.models
use_cuda = torch.cuda.is_available()
if use_cuda:
epochs = 100
batch_size = 40
else:
epochs = 1
batch_size = 5
def get_loaders():
train = np.load('train.npz')
x_train, y_train = train['xs'], train['ys']
val = np.load('val.npz')
x_val, y_val = val['xs'], val['ys']
print('# Cats in Train:', np.sum(y_train == 0))
print('# Dogs in Train:', np.sum(y_train == 1))
print('# Cats in Val:', np.sum(y_val == 0))
print('# Dogs in Val:', np.sum(y_val == 1))
x_train = np.transpose(x_train, [0, 3, 1, 2])
x_val = np.transpose(x_val, [0, 3, 1, 2])
x_train = torch.from_numpy(x_train).float()
y_train = torch.from_numpy(y_train).long()
x_val = torch.from_numpy(x_val).float()
y_val = torch.from_numpy(y_val).long()
train_dataset = TensorDataset(x_train, y_train)
val_dataset = TensorDataset(x_val, y_val)
train_loader = DataLoader(
train_dataset, batch_size=batch_size, shuffle=False)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
return train_loader, val_loader
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv = nn.Sequential(
nn.BatchNorm2d(3),
nn.Conv2d(3, 5, kernel_size=5, stride=2),
nn.ReLU(),
nn.Conv2d(5, 7, kernel_size=3, stride=2),
nn.ReLU(),
nn.Conv2d(7, 4, kernel_size=3, stride=1),
nn.BatchNorm2d(4), nn.ReLU(), nn.MaxPool2d(3))
self.fc = nn.Sequential(
nn.Linear(1156, 16), nn.Linear(16, 2), nn.Softmax())
def forward(self, x):
x = self.conv.forward(x)
x = x.view(batch_size, -1)
x = self.fc.forward(x)
return x
def main():
train, val = get_loaders()
model = torchvision.models.vgg16(pretrained=True) if use_cuda else Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
if use_cuda:
# model = nn.DataParallel(model)
model = model.cuda()
for epoch in range(epochs):
tqdm_arg = {
'desc': 'Epoch {}/{}'.format(epoch, epochs),
'total': len(train),
'ascii': True,
}
pbar_postfix = dict()
pbar = tqdm(**tqdm_arg)
sum_corr = 0.0
avg_corr = 0.0
model.train()
for i, (x, y) in enumerate(train):
if use_cuda:
x = x.cuda()
y = y.cuda()
x_var = Variable(x, requires_grad=True)
y_var = Variable(y)
out = model(x_var) # gets Var
loss = criterion(out, y_var) # gets FloatTensor
pred = out.data.max(1)[1] # gets LongTensor
# Note that
# out.max(1) gets a tuple: (Var(FloatTensor), Var(LongTensor))
# out.data.max(1) gets a tuple: (FloatTensor, LongTensor)
# while
# out.max() gets Var(FloatTensor)
# out.data.max() gets float(scalar)
cnt_corr = (pred == y).sum() # (a == b) gets ByteTensor, sum() gets scalar
print(type(cnt_corr))
break
sum_corr += cnt_corr
avg_corr = sum_corr / ((i + 1) * batch_size)
optimizer.zero_grad()
loss.backward()
optimizer.step()
pbar_postfix['loss'] = '{:.03f}'.format(loss.data[0])
pbar_postfix['acc'] = '{:.03f}'.format(avg_corr)
pbar.set_postfix(**pbar_postfix)
pbar.update(1)
model.eval()
for (x, y) in val:
if use_cuda:
x = x.cuda()
y = y.cuda()
x_var = Variable(x)
y_var = Variable(y)
pred = model(x_var)
loss = criterion(pred, y_var)
pbar_postfix['val_loss'] = '{:.03f}'.format(loss.data[0])
pbar.set_postfix(**pbar_postfix)
pbar.refresh()
pbar.close()
if __name__ == '__main__':
main()
| [
"numpy.load",
"tqdm.tqdm",
"numpy.sum",
"torch.nn.ReLU",
"torch.utils.data.DataLoader",
"torch.autograd.Variable",
"torch.nn.Conv2d",
"numpy.transpose",
"torch.nn.CrossEntropyLoss",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available",
"torch.utils.data.TensorDataset",
"torch.nn.Softmax",
"to... | [((285, 310), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (308, 310), False, 'import torch\n'), ((434, 454), 'numpy.load', 'np.load', (['"""train.npz"""'], {}), "('train.npz')\n", (441, 454), True, 'import numpy as np\n'), ((513, 531), 'numpy.load', 'np.load', (['"""val.npz"""'], {}), "('val.npz')\n", (520, 531), True, 'import numpy as np\n'), ((788, 823), 'numpy.transpose', 'np.transpose', (['x_train', '[0, 3, 1, 2]'], {}), '(x_train, [0, 3, 1, 2])\n', (800, 823), True, 'import numpy as np\n'), ((836, 869), 'numpy.transpose', 'np.transpose', (['x_val', '[0, 3, 1, 2]'], {}), '(x_val, [0, 3, 1, 2])\n', (848, 869), True, 'import numpy as np\n'), ((1074, 1105), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_train', 'y_train'], {}), '(x_train, y_train)\n', (1087, 1105), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((1124, 1151), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_val', 'y_val'], {}), '(x_val, y_val)\n', (1137, 1151), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((1171, 1234), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(train_dataset, batch_size=batch_size, shuffle=False)\n', (1181, 1234), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((1261, 1322), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(val_dataset, batch_size=batch_size, shuffle=False)\n', (1271, 1322), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((2156, 2177), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2175, 2177), True, 'import torch.nn as nn\n'), ((603, 623), 'numpy.sum', 'np.sum', (['(y_train == 0)'], {}), '(y_train == 0)\n', (609, 623), True, 'import numpy as np\n'), ((655, 675), 'numpy.sum', 'np.sum', (['(y_train == 1)'], {}), '(y_train == 1)\n', (661, 675), True, 'import numpy as np\n'), ((705, 723), 'numpy.sum', 'np.sum', (['(y_val == 0)'], {}), '(y_val == 0)\n', (711, 723), True, 'import numpy as np\n'), ((753, 771), 'numpy.sum', 'np.sum', (['(y_val == 1)'], {}), '(y_val == 1)\n', (759, 771), True, 'import numpy as np\n'), ((2562, 2578), 'tqdm.tqdm', 'tqdm', ([], {}), '(**tqdm_arg)\n', (2566, 2578), False, 'from tqdm import tqdm\n'), ((885, 910), 'torch.from_numpy', 'torch.from_numpy', (['x_train'], {}), '(x_train)\n', (901, 910), False, 'import torch\n'), ((933, 958), 'torch.from_numpy', 'torch.from_numpy', (['y_train'], {}), '(y_train)\n', (949, 958), False, 'import torch\n'), ((978, 1001), 'torch.from_numpy', 'torch.from_numpy', (['x_val'], {}), '(x_val)\n', (994, 1001), False, 'import torch\n'), ((1022, 1045), 'torch.from_numpy', 'torch.from_numpy', (['y_val'], {}), '(y_val)\n', (1038, 1045), False, 'import torch\n'), ((1490, 1507), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(3)'], {}), '(3)\n', (1504, 1507), True, 'import torch.nn as nn\n'), ((1521, 1561), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(5)'], {'kernel_size': '(5)', 'stride': '(2)'}), '(3, 5, kernel_size=5, stride=2)\n', (1530, 1561), True, 'import torch.nn as nn\n'), ((1575, 1584), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1582, 1584), True, 'import torch.nn as nn\n'), ((1598, 1638), 'torch.nn.Conv2d', 'nn.Conv2d', (['(5)', '(7)'], {'kernel_size': '(3)', 'stride': '(2)'}), '(5, 7, kernel_size=3, stride=2)\n', (1607, 1638), True, 'import torch.nn as nn\n'), ((1652, 1661), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1659, 1661), True, 'import torch.nn as nn\n'), ((1675, 1715), 'torch.nn.Conv2d', 'nn.Conv2d', (['(7)', '(4)'], {'kernel_size': '(3)', 'stride': '(1)'}), '(7, 4, kernel_size=3, stride=1)\n', (1684, 1715), True, 'import torch.nn as nn\n'), ((1729, 1746), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(4)'], {}), '(4)\n', (1743, 1746), True, 'import torch.nn as nn\n'), ((1748, 1757), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1755, 1757), True, 'import torch.nn as nn\n'), ((1759, 1774), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(3)'], {}), '(3)\n', (1771, 1774), True, 'import torch.nn as nn\n'), ((1821, 1840), 'torch.nn.Linear', 'nn.Linear', (['(1156)', '(16)'], {}), '(1156, 16)\n', (1830, 1840), True, 'import torch.nn as nn\n'), ((1842, 1858), 'torch.nn.Linear', 'nn.Linear', (['(16)', '(2)'], {}), '(16, 2)\n', (1851, 1858), True, 'import torch.nn as nn\n'), ((1860, 1872), 'torch.nn.Softmax', 'nn.Softmax', ([], {}), '()\n', (1870, 1872), True, 'import torch.nn as nn\n'), ((2796, 2827), 'torch.autograd.Variable', 'Variable', (['x'], {'requires_grad': '(True)'}), '(x, requires_grad=True)\n', (2804, 2827), False, 'from torch.autograd import Variable\n'), ((2848, 2859), 'torch.autograd.Variable', 'Variable', (['y'], {}), '(y)\n', (2856, 2859), False, 'from torch.autograd import Variable\n'), ((3981, 3992), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (3989, 3992), False, 'from torch.autograd import Variable\n'), ((4013, 4024), 'torch.autograd.Variable', 'Variable', (['y'], {}), '(y)\n', (4021, 4024), False, 'from torch.autograd import Variable\n')] |
from math import cos, sin, pi, exp, sqrt
import numpy as np
positions = []
rotations = []
scales = []
shift_factor = np.array(shift_factor)
spiral_types = ['archimedean', 'hyperbolic', 'fermat', 'lituus', 'log']
if spiral_type not in spiral_types:
spiral_type = 'archimedean'
# iterate through each element
for i in range(nb):
# angle increases constantly for each new element
angle = angle_shift * i # angle is in radians
# transformation shift
if spiral_type == 'archimedean':
trans_shift = shift_factor * (angle/(2*pi))
elif spiral_type == 'log':
trans_shift = shift_factor * exp(angle/(2*pi))
elif spiral_type == 'hyperbolic':
trans_shift = shift_factor / (angle/(2*pi))
elif spiral_type == 'fermat':
trans_shift = shift_factor * sqrt(angle/(2*pi))
elif spiral_type == 'lituus':
trans_shift = shift_factor / sqrt(angle/(2*pi))
# radius, scale and rotation
# add to starting value the given shift proportional to the angle
radius = np.array(base_radius) + (radius_shift * trans_shift[0])
scale = np.array(base_scale) + (scale_shift * trans_shift[1])
rotation = np.array(base_rotation) + (rotation_shift * trans_shift[2])
# find position based on radius and angle
x = center[0] + radius[0] * cos(angle)
y = center[1] + radius[1] * sin(angle)
z = center[2] - radius[2]
positions.append((x, y, z))
# add rotation on z such that element always points outward
rotations.append((rotation[0], rotation[1], rotation[2]+ angle%(2*pi)))
scales.append(list(scale)) | [
"math.exp",
"math.sqrt",
"math.sin",
"numpy.array",
"math.cos"
] | [((118, 140), 'numpy.array', 'np.array', (['shift_factor'], {}), '(shift_factor)\n', (126, 140), True, 'import numpy as np\n'), ((1039, 1060), 'numpy.array', 'np.array', (['base_radius'], {}), '(base_radius)\n', (1047, 1060), True, 'import numpy as np\n'), ((1107, 1127), 'numpy.array', 'np.array', (['base_scale'], {}), '(base_scale)\n', (1115, 1127), True, 'import numpy as np\n'), ((1176, 1199), 'numpy.array', 'np.array', (['base_rotation'], {}), '(base_rotation)\n', (1184, 1199), True, 'import numpy as np\n'), ((1319, 1329), 'math.cos', 'cos', (['angle'], {}), '(angle)\n', (1322, 1329), False, 'from math import cos, sin, pi, exp, sqrt\n'), ((1362, 1372), 'math.sin', 'sin', (['angle'], {}), '(angle)\n', (1365, 1372), False, 'from math import cos, sin, pi, exp, sqrt\n'), ((629, 650), 'math.exp', 'exp', (['(angle / (2 * pi))'], {}), '(angle / (2 * pi))\n', (632, 650), False, 'from math import cos, sin, pi, exp, sqrt\n'), ((808, 830), 'math.sqrt', 'sqrt', (['(angle / (2 * pi))'], {}), '(angle / (2 * pi))\n', (812, 830), False, 'from math import cos, sin, pi, exp, sqrt\n'), ((898, 920), 'math.sqrt', 'sqrt', (['(angle / (2 * pi))'], {}), '(angle / (2 * pi))\n', (902, 920), False, 'from math import cos, sin, pi, exp, sqrt\n')] |
import argparse
import cv2
import numpy as np
import os
import sys
import torch
from utils.model_opr import load_model
from utils.common import tensor2img, calculate_psnr, calculate_ssim, bgr2ycbcr
def get_network(model_path):
if 'REDS' in model_path:
from exps.MuCAN_REDS.config import config
from exps.MuCAN_REDS.network import Network
elif 'Vimeo' in model_path:
from exps.MuCAN_Vimeo90K.config import config
from exps.MuCAN_Vimeo90K.network import Network
elif 'LAPAR_A_x2' in model_path:
from exps.LAPAR_A_x2.config import config
from exps.LAPAR_A_x2.network import Network
elif 'LAPAR_A_x3' in model_path:
from exps.LAPAR_A_x3.config import config
from exps.LAPAR_A_x3.network import Network
elif 'LAPAR_A_x4' in model_path:
from exps.LAPAR_A_x4.config import config
from exps.LAPAR_A_x4.network import Network
elif 'LAPAR_B_x2' in model_path:
from exps.LAPAR_B_x2.config import config
from exps.LAPAR_B_x2.network import Network
elif 'LAPAR_B_x3' in model_path:
from exps.LAPAR_B_x3.config import config
from exps.LAPAR_B_x3.network import Network
elif 'LAPAR_B_x4' in model_path:
from exps.LAPAR_B_x4.config import config
from exps.LAPAR_B_x4.network import Network
elif 'LAPAR_C_x2' in model_path:
from exps.LAPAR_C_x2.config import config
from exps.LAPAR_C_x2.network import Network
elif 'LAPAR_C_x3' in model_path:
from exps.LAPAR_C_x3.config import config
from exps.LAPAR_C_x3.network import Network
elif 'LAPAR_C_x4' in model_path:
from exps.LAPAR_C_x4.config import config
from exps.LAPAR_C_x4.network import Network
else:
print('Illenal model: not implemented!')
sys.exit(1)
# an ugly operation
if 'KERNEL_PATH' in config.MODEL:
config.MODEL.KERNEL_PATH = config.MODEL.KERNEL_PATH.replace('../', '')
return config, Network(config)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--sr_type', type=str, default='SISR')
parser.add_argument('--model_path', type=str, default=None)
parser.add_argument('--input_path', type=str, default=None)
parser.add_argument('--output_path', type=str, default=None)
parser.add_argument('--gt_path', type=str, default=None)
args = parser.parse_args()
if args.output_path and not os.path.exists(args.output_path):
os.makedirs(args.output_path)
print('Loading Network ...')
config, model = get_network(args.model_path)
device = torch.device('cuda')
model = model.to(device)
load_model(model, args.model_path, strict=True)
down = config.MODEL.DOWN
scale = config.MODEL.SCALE
print('Reading Images ...')
ipath_l = []
for f in sorted(os.listdir(args.input_path)):
if f.endswith('png') or f.endswith('jpg'):
ipath_l.append(os.path.join(args.input_path, f))
if args.gt_path:
gpath_l = []
for f in sorted(os.listdir(args.gt_path)):
if f.endswith('png') or f.endswith('jpg'):
gpath_l.append(os.path.join(args.gt_path, f))
psnr_l = []
ssim_l = []
if args.sr_type == 'SISR':
with torch.no_grad():
for i, f in enumerate(ipath_l):
img_name = f.split('/')[-1]
print('Processing: %s' % img_name)
lr_img = cv2.imread(f, cv2.IMREAD_COLOR)
lr_img = np.transpose(lr_img[:, :, ::-1], (2, 0, 1)).astype(np.float32) / 255.0
lr_img = torch.from_numpy(lr_img).float().to(device).unsqueeze(0)
_, C, H, W = lr_img.size()
need_pad = False
if H % down != 0 or W % down != 0:
need_pad = True
pad_y_t = (down - H % down) % down // 2
pad_y_b = (down - H % down) % down - pad_y_t
pad_x_l = (down - W % down) % down // 2
pad_x_r = (down - W % down) % down - pad_x_l
lr_img = torch.nn.functional.pad(lr_img, pad=(pad_x_l, pad_x_r, pad_y_t, pad_y_b), mode='replicate')
output = model(lr_img)
if need_pad:
y_end = -pad_y_b * scale if pad_y_b != 0 else output.size(2)
x_end = -pad_x_r * scale if pad_x_r != 0 else output.size(3)
output = output[:, :, pad_y_t * scale: y_end, pad_x_l * scale: x_end]
output = tensor2img(output)
if args.output_path:
output_path = os.path.join(args.output_path, img_name)
cv2.imwrite(output_path, output)
if args.gt_path:
output = output.astype(np.float32) / 255.0
gt = cv2.imread(gpath_l[i], cv2.IMREAD_COLOR).astype(np.float32) / 255.0
# to y channel
output = bgr2ycbcr(output, only_y=True)
gt = bgr2ycbcr(gt, only_y=True)
output = output[scale:-scale, scale:-scale]
#gt = gt[scale:-scale, scale:-scale]
#psnr = calculate_psnr(output * 255, gt * 255)
#ssim = calculate_ssim(output * 255, gt * 255)
#psnr_l.append(psnr)
#ssim_l.append(ssim)
elif args.sr_type == 'VSR':
num_img = len(ipath_l)
half_n = config.MODEL.N_FRAME // 2
with torch.no_grad():
for i, f in enumerate(ipath_l):
img_name = f.split('/')[-1]
print('Processing: %s' % img_name)
nbr_l = []
for j in range(i - half_n, i + half_n + 1):
if j < 0:
ipath = ipath_l[i + half_n - j]
elif j >= num_img:
ipath = ipath_l[i - half_n - (j - num_img + 1)]
else:
ipath = ipath_l[j]
nbr_img = cv2.imread(ipath, cv2.IMREAD_COLOR)
nbr_l.append(nbr_img)
lr_imgs = np.stack(nbr_l, axis=0)
lr_imgs = np.transpose(lr_imgs[:, :, :, ::-1], (0, 3, 1, 2)).astype(np.float32) / 255.0
lr_imgs = torch.from_numpy(lr_imgs).float().to(device)
N, C, H, W = lr_imgs.size()
need_pad = False
if H % down != 0 or W % down != 0:
need_pad = True
pad_y_t = (down - H % down) % down // 2
pad_y_b = (down - H % down) % down - pad_y_t
pad_x_l = (down - W % down) % down // 2
pad_x_r = (down - W % down) % down - pad_x_l
lr_imgs = torch.nn.functional.pad(lr_imgs, pad=(pad_x_l, pad_x_r, pad_y_t, pad_y_b), mode='replicate')
lr_imgs = lr_imgs.unsqueeze(0)
output = model(lr_imgs)
if need_pad:
y_end = -pad_y_b * scale if pad_y_b != 0 else output.size(2)
x_end = -pad_x_r * scale if pad_x_r != 0 else output.size(3)
output = output[:, :, pad_y_t * scale: y_end, pad_x_l * scale: x_end]
output = tensor2img(output)
if args.output_path:
output_path = os.path.join(args.output_path, img_name)
cv2.imwrite(output_path, output)
if args.gt_path:
output = output.astype(np.float32) / 255.0
gt = cv2.imread(gpath_l[i], cv2.IMREAD_COLOR).astype(np.float32) / 255.0
# to y channel
output = bgr2ycbcr(output, only_y=True)
gt = bgr2ycbcr(gt, only_y=True)
output = output[scale:-scale, scale:-scale]
gt = gt[scale:-scale, scale:-scale]
psnr = calculate_psnr(output * 255, gt * 255)
ssim = calculate_ssim(output * 255, gt * 255)
psnr_l.append(psnr)
ssim_l.append(ssim)
else:
print('Illenal SR type: not implemented!')
sys.exit(1)
# if args.gt_path:
# avg_psnr = sum(psnr_l) / len(psnr_l)
# avg_ssim = sum(ssim_l) / len(ssim_l)
# print('--------- Result ---------')
# print('PSNR: %.2f, SSIM:%.4f' % (avg_psnr, avg_ssim))
print('Finished!')
| [
"argparse.ArgumentParser",
"torch.device",
"torch.no_grad",
"os.path.join",
"torch.nn.functional.pad",
"utils.common.tensor2img",
"cv2.imwrite",
"os.path.exists",
"numpy.transpose",
"exps.LAPAR_C_x4.config.config.MODEL.KERNEL_PATH.replace",
"numpy.stack",
"exps.LAPAR_C_x4.network.Network",
"... | [((2054, 2079), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2077, 2079), False, 'import argparse\n'), ((2629, 2649), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2641, 2649), False, 'import torch\n'), ((2683, 2730), 'utils.model_opr.load_model', 'load_model', (['model', 'args.model_path'], {'strict': '(True)'}), '(model, args.model_path, strict=True)\n', (2693, 2730), False, 'from utils.model_opr import load_model\n'), ((1932, 1975), 'exps.LAPAR_C_x4.config.config.MODEL.KERNEL_PATH.replace', 'config.MODEL.KERNEL_PATH.replace', (['"""../"""', '""""""'], {}), "('../', '')\n", (1964, 1975), False, 'from exps.LAPAR_C_x4.config import config\n'), ((1996, 2011), 'exps.LAPAR_C_x4.network.Network', 'Network', (['config'], {}), '(config)\n', (2003, 2011), False, 'from exps.LAPAR_C_x4.network import Network\n'), ((2503, 2532), 'os.makedirs', 'os.makedirs', (['args.output_path'], {}), '(args.output_path)\n', (2514, 2532), False, 'import os\n'), ((2862, 2889), 'os.listdir', 'os.listdir', (['args.input_path'], {}), '(args.input_path)\n', (2872, 2889), False, 'import os\n'), ((2461, 2493), 'os.path.exists', 'os.path.exists', (['args.output_path'], {}), '(args.output_path)\n', (2475, 2493), False, 'import os\n'), ((3071, 3095), 'os.listdir', 'os.listdir', (['args.gt_path'], {}), '(args.gt_path)\n', (3081, 3095), False, 'import os\n'), ((3300, 3315), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3313, 3315), False, 'import torch\n'), ((8262, 8273), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8270, 8273), False, 'import sys\n'), ((2970, 3002), 'os.path.join', 'os.path.join', (['args.input_path', 'f'], {}), '(args.input_path, f)\n', (2982, 3002), False, 'import os\n'), ((3481, 3512), 'cv2.imread', 'cv2.imread', (['f', 'cv2.IMREAD_COLOR'], {}), '(f, cv2.IMREAD_COLOR)\n', (3491, 3512), False, 'import cv2\n'), ((4575, 4593), 'utils.common.tensor2img', 'tensor2img', (['output'], {}), '(output)\n', (4585, 4593), False, 'from utils.common import tensor2img, calculate_psnr, calculate_ssim, bgr2ycbcr\n'), ((5558, 5573), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5571, 5573), False, 'import torch\n'), ((3184, 3213), 'os.path.join', 'os.path.join', (['args.gt_path', 'f'], {}), '(args.gt_path, f)\n', (3196, 3213), False, 'import os\n'), ((4135, 4230), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['lr_img'], {'pad': '(pad_x_l, pad_x_r, pad_y_t, pad_y_b)', 'mode': '"""replicate"""'}), "(lr_img, pad=(pad_x_l, pad_x_r, pad_y_t, pad_y_b),\n mode='replicate')\n", (4158, 4230), False, 'import torch\n'), ((4665, 4705), 'os.path.join', 'os.path.join', (['args.output_path', 'img_name'], {}), '(args.output_path, img_name)\n', (4677, 4705), False, 'import os\n'), ((4726, 4758), 'cv2.imwrite', 'cv2.imwrite', (['output_path', 'output'], {}), '(output_path, output)\n', (4737, 4758), False, 'import cv2\n'), ((5014, 5044), 'utils.common.bgr2ycbcr', 'bgr2ycbcr', (['output'], {'only_y': '(True)'}), '(output, only_y=True)\n', (5023, 5044), False, 'from utils.common import tensor2img, calculate_psnr, calculate_ssim, bgr2ycbcr\n'), ((5070, 5096), 'utils.common.bgr2ycbcr', 'bgr2ycbcr', (['gt'], {'only_y': '(True)'}), '(gt, only_y=True)\n', (5079, 5096), False, 'from utils.common import tensor2img, calculate_psnr, calculate_ssim, bgr2ycbcr\n'), ((6201, 6224), 'numpy.stack', 'np.stack', (['nbr_l'], {'axis': '(0)'}), '(nbr_l, axis=0)\n', (6209, 6224), True, 'import numpy as np\n'), ((7335, 7353), 'utils.common.tensor2img', 'tensor2img', (['output'], {}), '(output)\n', (7345, 7353), False, 'from utils.common import tensor2img, calculate_psnr, calculate_ssim, bgr2ycbcr\n'), ((6097, 6132), 'cv2.imread', 'cv2.imread', (['ipath', 'cv2.IMREAD_COLOR'], {}), '(ipath, cv2.IMREAD_COLOR)\n', (6107, 6132), False, 'import cv2\n'), ((6846, 6942), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['lr_imgs'], {'pad': '(pad_x_l, pad_x_r, pad_y_t, pad_y_b)', 'mode': '"""replicate"""'}), "(lr_imgs, pad=(pad_x_l, pad_x_r, pad_y_t, pad_y_b),\n mode='replicate')\n", (6869, 6942), False, 'import torch\n'), ((7425, 7465), 'os.path.join', 'os.path.join', (['args.output_path', 'img_name'], {}), '(args.output_path, img_name)\n', (7437, 7465), False, 'import os\n'), ((7486, 7518), 'cv2.imwrite', 'cv2.imwrite', (['output_path', 'output'], {}), '(output_path, output)\n', (7497, 7518), False, 'import cv2\n'), ((7774, 7804), 'utils.common.bgr2ycbcr', 'bgr2ycbcr', (['output'], {'only_y': '(True)'}), '(output, only_y=True)\n', (7783, 7804), False, 'from utils.common import tensor2img, calculate_psnr, calculate_ssim, bgr2ycbcr\n'), ((7830, 7856), 'utils.common.bgr2ycbcr', 'bgr2ycbcr', (['gt'], {'only_y': '(True)'}), '(gt, only_y=True)\n', (7839, 7856), False, 'from utils.common import tensor2img, calculate_psnr, calculate_ssim, bgr2ycbcr\n'), ((8006, 8044), 'utils.common.calculate_psnr', 'calculate_psnr', (['(output * 255)', '(gt * 255)'], {}), '(output * 255, gt * 255)\n', (8020, 8044), False, 'from utils.common import tensor2img, calculate_psnr, calculate_ssim, bgr2ycbcr\n'), ((8072, 8110), 'utils.common.calculate_ssim', 'calculate_ssim', (['(output * 255)', '(gt * 255)'], {}), '(output * 255, gt * 255)\n', (8086, 8110), False, 'from utils.common import tensor2img, calculate_psnr, calculate_ssim, bgr2ycbcr\n'), ((3538, 3581), 'numpy.transpose', 'np.transpose', (['lr_img[:, :, ::-1]', '(2, 0, 1)'], {}), '(lr_img[:, :, ::-1], (2, 0, 1))\n', (3550, 3581), True, 'import numpy as np\n'), ((4881, 4921), 'cv2.imread', 'cv2.imread', (['gpath_l[i]', 'cv2.IMREAD_COLOR'], {}), '(gpath_l[i], cv2.IMREAD_COLOR)\n', (4891, 4921), False, 'import cv2\n'), ((6251, 6301), 'numpy.transpose', 'np.transpose', (['lr_imgs[:, :, :, ::-1]', '(0, 3, 1, 2)'], {}), '(lr_imgs[:, :, :, ::-1], (0, 3, 1, 2))\n', (6263, 6301), True, 'import numpy as np\n'), ((6355, 6380), 'torch.from_numpy', 'torch.from_numpy', (['lr_imgs'], {}), '(lr_imgs)\n', (6371, 6380), False, 'import torch\n'), ((7641, 7681), 'cv2.imread', 'cv2.imread', (['gpath_l[i]', 'cv2.IMREAD_COLOR'], {}), '(gpath_l[i], cv2.IMREAD_COLOR)\n', (7651, 7681), False, 'import cv2\n'), ((3634, 3658), 'torch.from_numpy', 'torch.from_numpy', (['lr_img'], {}), '(lr_img)\n', (3650, 3658), False, 'import torch\n'), ((1822, 1833), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1830, 1833), False, 'import sys\n')] |
"""
example using distutils
The great thing is that python provides a nice tool called distutils.
Let it do all the hard compiling work for you.
"""
from distutils.core import setup, Extension
import numpy as np
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
module = Extension(name='_u_numpy',
sources=['u_numpy.cpp',
'u_numpy_wrap.cxx'],
include_dirs=[numpy_include],
)
setup(
name='u_numpy',
version="0.1",
author="eseshinpu",
description="""using numpy""",
ext_modules=[module],
py_modules=['u_numpy'],
)
| [
"distutils.core.Extension",
"numpy.get_numpy_include",
"numpy.get_include",
"distutils.core.setup"
] | [((333, 438), 'distutils.core.Extension', 'Extension', ([], {'name': '"""_u_numpy"""', 'sources': "['u_numpy.cpp', 'u_numpy_wrap.cxx']", 'include_dirs': '[numpy_include]'}), "(name='_u_numpy', sources=['u_numpy.cpp', 'u_numpy_wrap.cxx'],\n include_dirs=[numpy_include])\n", (342, 438), False, 'from distutils.core import setup, Extension\n'), ((523, 657), 'distutils.core.setup', 'setup', ([], {'name': '"""u_numpy"""', 'version': '"""0.1"""', 'author': '"""eseshinpu"""', 'description': '"""using numpy"""', 'ext_modules': '[module]', 'py_modules': "['u_numpy']"}), "(name='u_numpy', version='0.1', author='eseshinpu', description=\n 'using numpy', ext_modules=[module], py_modules=['u_numpy'])\n", (528, 657), False, 'from distutils.core import setup, Extension\n'), ((240, 256), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (254, 256), True, 'import numpy as np\n'), ((300, 322), 'numpy.get_numpy_include', 'np.get_numpy_include', ([], {}), '()\n', (320, 322), True, 'import numpy as np\n')] |
"""
Spherical Harmonic Coefficient and Grid classes
"""
import numpy as _np
import matplotlib as _mpl
import matplotlib.pyplot as _plt
from mpl_toolkits.axes_grid1 import make_axes_locatable as _make_axes_locatable
import copy as _copy
import warnings as _warnings
from scipy.special import factorial as _factorial
import xarray as _xr
from .. import shtools as _shtools
from ..spectralanalysis import spectrum as _spectrum
from ..spectralanalysis import cross_spectrum as _cross_spectrum
from ..shio import convert as _convert
from ..shio import shread as _shread
try:
import cartopy.crs as _ccrs
from cartopy.mpl.ticker import LongitudeFormatter as _LongitudeFormatter
from cartopy.mpl.ticker import LatitudeFormatter as _LatitudeFormatter
_cartopy_module = True
except ModuleNotFoundError:
_cartopy_module = False
try:
import pygmt as _pygmt
_pygmt_module = True
except ModuleNotFoundError:
_pygmt_module = False
# =============================================================================
# ========= COEFFICIENT CLASSES =========================================
# =============================================================================
class SHCoeffs(object):
"""
Spherical Harmonics Coefficient class.
The coefficients of this class can be initialized using one of the four
constructor methods:
x = SHCoeffs.from_array(array)
x = SHCoeffs.from_random(powerspectrum)
x = SHCoeffs.from_zeros(lmax)
x = SHCoeffs.from_file('fname.dat')
x = SHCoeffs.from_netcdf('ncname.nc')
x = SHCoeffs.from_cap(theta, lmax)
The normalization convention of the input coefficents is specified
by the normalization and csphase parameters, which take the following
values:
normalization : '4pi' (default), geodesy 4-pi normalized.
: 'ortho', orthonormalized.
: 'schmidt', Schmidt semi-normalized.
: 'unnorm', unnormalized.
csphase : 1 (default), exlcude the Condon-Shortley phase factor.
: -1, include the Condon-Shortley phase factor.
See the documentation for each constructor method for further options.
Once initialized, each class instance defines the following class
attributes:
lmax : The maximum spherical harmonic degree of the coefficients.
coeffs : The raw coefficients with the specified normalization and
csphase conventions.
normalization : The normalization of the coefficients: '4pi', 'ortho',
'schmidt', or 'unnorm'.
csphase : Defines whether the Condon-Shortley phase is used (1)
or not (-1).
mask : A boolean mask that is True for the permissible values of
degree l and order m.
kind : The coefficient data type: either 'complex' or 'real'.
header : A list of values (of type str) from the header line of the
input file used to initialize the class (for 'shtools'
formatted files).
Each class instance provides the following methods:
degrees() : Return an array listing the spherical harmonic
degrees from 0 to lmax.
spectrum() : Return the spectrum of the function as a function
of spherical harmonic degree.
cross_spectrum() : Return the cross-spectrum of two functions as a
function of spherical harmonic degree.
volume() : Calculate the volume of the body.
centroid() : Compute the centroid of the body.
set_coeffs() : Set coefficients in-place to specified values.
rotate() : Rotate the coordinate system used to express the
spherical harmonic coefficients and return a new
class instance.
convert() : Return a new class instance using a different
normalization convention.
pad() : Return a new class instance that is zero padded or
truncated to a different lmax.
expand() : Evaluate the coefficients either on a spherical
grid and return an SHGrid class instance, or for
a list of latitude and longitude coordinates.
plot_spectrum() : Plot the spectrum as a function of spherical
harmonic degree.
plot_cross_spectrum() : Plot the cross-spectrum of two functions.
plot_spectrum2d() : Plot the 2D spectrum of all spherical harmonic
degrees and orders.
plot_cross_spectrum2d() : Plot the 2D cross-spectrum of all spherical
harmonic degrees and orders.
to_array() : Return an array of spherical harmonic coefficients
with a different normalization convention.
to_file() : Save raw spherical harmonic coefficients as a file.
to_netcdf() : Save raw spherical harmonic coefficients as a
netcdf file.
copy() : Return a copy of the class instance.
info() : Print a summary of the data stored in the SHCoeffs
instance.
"""
def __init__(self):
"""Unused constructor of the super class."""
print('Initialize the class using one of the class methods:\n'
'>>> pyshtools.SHCoeffs.from_array\n'
'>>> pyshtools.SHCoeffs.from_random\n'
'>>> pyshtools.SHCoeffs.from_zeros\n'
'>>> pyshtools.SHCoeffs.from_file\n'
'>>> pyshtools.SHCoeffs.from_netcdf\n'
'>>> pyshtools.SHCoeffs.from_cap\n'
)
# ---- Factory methods ----
@classmethod
def from_zeros(self, lmax, kind='real', normalization='4pi', csphase=1):
"""
Initialize class with spherical harmonic coefficients set to zero from
degree 0 to lmax.
Usage
-----
x = SHCoeffs.from_zeros(lmax, [normalization, csphase])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
lmax : int
The highest spherical harmonic degree l of the coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
kind : str, optional, default = 'real'
'real' or 'complex' spherical harmonic coefficients.
"""
if kind.lower() not in ('real', 'complex'):
raise ValueError(
"Kind must be 'real' or 'complex'. Input value is {:s}."
.format(repr(kind))
)
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value is {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be either 1 or -1. Input value is {:s}."
.format(repr(csphase))
)
if normalization.lower() == 'unnorm' and lmax > 85:
_warnings.warn("Calculations using unnormalized coefficients " +
"are stable only for degrees less than or equal " +
"to 85. lmax for the coefficients will be set to " +
"85. Input value is {:d}.".format(lmax),
category=RuntimeWarning)
lmax = 85
nl = lmax + 1
if kind.lower() == 'real':
coeffs = _np.zeros((2, nl, nl))
else:
coeffs = _np.zeros((2, nl, nl), dtype=complex)
for cls in self.__subclasses__():
if cls.istype(kind):
return cls(coeffs, normalization=normalization.lower(),
csphase=csphase)
@classmethod
def from_array(self, coeffs, normalization='4pi', csphase=1, lmax=None,
copy=True):
"""
Initialize the class with spherical harmonic coefficients from an input
array.
Usage
-----
x = SHCoeffs.from_array(array, [normalization, csphase, lmax, copy])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
array : ndarray, shape (2, lmaxin+1, lmaxin+1).
The input spherical harmonic coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
lmax : int, optional, default = None
The maximum spherical harmonic degree to include in the returned
class instance. This must be less than or equal to lmaxin.
copy : bool, optional, default = True
If True, make a copy of array when initializing the class instance.
If False, initialize the class instance with a reference to array.
"""
if _np.iscomplexobj(coeffs):
kind = 'complex'
else:
kind = 'real'
if type(normalization) != str:
raise ValueError('normalization must be a string. ' +
'Input type is {:s}.'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value is {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be either 1 or -1. Input value is {:s}."
.format(repr(csphase))
)
lmaxin = coeffs.shape[1] - 1
if lmax is None:
lmax = lmaxin
else:
if lmax > lmaxin:
lmax = lmaxin
if normalization.lower() == 'unnorm' and lmax > 85:
_warnings.warn("Calculations using unnormalized coefficients " +
"are stable only for degrees less than or equal " +
"to 85. lmax for the coefficients will be set to " +
"85. Input value is {:d}.".format(lmax),
category=RuntimeWarning)
lmax = 85
for cls in self.__subclasses__():
if cls.istype(kind):
return cls(coeffs[:, 0:lmax+1, 0:lmax+1],
normalization=normalization.lower(),
csphase=csphase, copy=copy)
@classmethod
def from_random(self, power, lmax=None, kind='real', normalization='4pi',
csphase=1, exact_power=False, seed=None):
"""
Initialize the class with spherical harmonic coefficients as random
variables with a given spectrum.
Usage
-----
x = SHCoeffs.from_random(power, [lmax, kind, normalization, csphase,
exact_power, seed])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
power : ndarray, shape (L+1)
numpy array of shape (L+1) that specifies the expected power per
degree l of the random coefficients, where L is the maximum
spherical harmonic bandwidth.
lmax : int, optional, default = len(power) - 1
The maximum spherical harmonic degree l of the output coefficients.
The coefficients will be set to zero for degrees greater than L.
kind : str, optional, default = 'real'
'real' or 'complex' spherical harmonic coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
exact_power : bool, optional, default = False
The total variance of the coefficients is set exactly to the input
power. The distribution of power at degree l amongst the angular
orders is random, but the total power is fixed.
seed : int, optional, default = None
Set the seed for the numpy random number generator.
Notes
-----
This routine returns a random realization of spherical harmonic
coefficients obtained from a normal distribution. The variance of
each coefficient at degree l is equal to the total power at degree
l divided by the number of coefficients at that degree. The power
spectrum of the random realization can be fixed exactly to the input
spectrum by setting exact_power to True.
"""
# check if all arguments are correct
if type(normalization) != str:
raise ValueError('normalization must be a string. ' +
'Input type is {:s}.'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The input normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Provided value is {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be 1 or -1. Input value is {:s}."
.format(repr(csphase))
)
if kind.lower() not in ('real', 'complex'):
raise ValueError(
"kind must be 'real' or 'complex'. " +
"Input value is {:s}.".format(repr(kind)))
if lmax is None:
nl = len(power)
lmax = nl - 1
else:
if lmax <= len(power) - 1:
nl = lmax + 1
else:
nl = len(power)
degrees = _np.arange(nl)
if normalization.lower() == 'unnorm' and nl - 1 > 85:
_warnings.warn("Calculations using unnormalized coefficients " +
"are stable only for degrees less than or equal " +
"to 85. lmax for the coefficients will be set to " +
"85. Input value is {:d}.".format(nl-1),
category=RuntimeWarning)
nl = 85 + 1
lmax = 85
# Create coefficients with unit variance, which returns an expected
# total power per degree of (2l+1) for 4pi normalized harmonics.
if seed is not None:
_np.random.seed(seed=seed)
if kind.lower() == 'real':
coeffs = _np.zeros((2, nl, nl))
for l in degrees:
coeffs[:2, l, :l+1] = _np.random.normal(size=(2, l+1))
elif kind.lower() == 'complex':
# - need to divide by sqrt 2 as there are two terms for each coeff.
coeffs = _np.zeros((2, nl, nl), dtype=complex)
for l in degrees:
coeffs[:2, l, :l+1] = (_np.random.normal(size=(2, l+1)) +
1j * _np.random.normal(size=(2, l+1))
) / _np.sqrt(2.)
if exact_power:
power_per_l = _spectrum(coeffs, normalization='4pi', unit='per_l')
coeffs *= _np.sqrt(
power[0:nl] / power_per_l)[_np.newaxis, :, _np.newaxis]
else:
coeffs *= _np.sqrt(
power[0:nl] / (2 * degrees + 1))[_np.newaxis, :, _np.newaxis]
if normalization.lower() == '4pi':
pass
elif normalization.lower() == 'ortho':
coeffs = _convert(coeffs, normalization_in='4pi',
normalization_out='ortho')
elif normalization.lower() == 'schmidt':
coeffs = _convert(coeffs, normalization_in='4pi',
normalization_out='schmidt')
elif normalization.lower() == 'unnorm':
coeffs = _convert(coeffs, normalization_in='4pi',
normalization_out='unnorm')
if lmax > nl - 1:
coeffs = _np.pad(coeffs, ((0, 0), (0, lmax - nl + 1),
(0, lmax - nl + 1)), 'constant')
for cls in self.__subclasses__():
if cls.istype(kind):
return cls(coeffs, normalization=normalization.lower(),
csphase=csphase)
@classmethod
def from_file(self, fname, lmax=None, format='shtools', kind='real',
normalization='4pi', skip=0, header=False,
csphase=1, **kwargs):
"""
Initialize the class with spherical harmonic coefficients from a file.
Usage
-----
x = SHCoeffs.from_file(filename, [format='shtools', lmax,
normalization, csphase, skip,
header])
x = SHCoeffs.from_file(filename, [format='npy', normalization,
csphase, **kwargs])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
filename : str
File name or URL containing the text-formatted spherical harmonic
coefficients. filename will be treated as a URL if it starts with
'http://', 'https://', or 'ftp://'.
format : str, optional, default = 'shtools'
'shtools' format or binary numpy 'npy' format.
lmax : int, optional, default = None
The maximum spherical harmonic degree to read from 'shtools'
formatted files.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
skip : int, optional, default = 0
Number of lines to skip at the beginning of the file when format is
'shtools'.
header : bool, optional, default = False
If True, read a list of values from the header line of an 'shtools'
formatted file.
**kwargs : keyword argument list, optional for format = 'npy'
Keyword arguments of numpy.load() when format is 'npy'.
Notes
-----
If format='shtools', spherical harmonic coefficients will be read from
a text file. The optional parameter `skip` specifies how many lines
should be skipped before attempting to parse the file, the optional
parameter `header` specifies whether to read a list of values from a
header line, and the optional parameter `lmax` specifies the maximum
degree to read from the file. All lines that do not start with 2
integers and that are less than 3 words long will be treated as
comments and ignored. For this format, each line of the file must
contain
l, m, coeffs[0, l, m], coeffs[1, l, m]
where l and m are the spherical harmonic degree and order,
respectively. The terms coeffs[1, l, 0] can be neglected as they are
zero. For more information, see `shio.shread()`.
If filename starts with http://, https://, or ftp://, the file will be
treated as a URL. In this case, the file will be downloaded in its
entirety before it is parsed.
If format='npy', a binary numpy 'npy' file will be read using
numpy.load().
"""
if type(normalization) != str:
raise ValueError('normalization must be a string. '
'Input type is {:s}.'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The input normalization must be '4pi', 'ortho', 'schmidt', "
"or 'unnorm'. Provided value is {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be 1 or -1. Input value is {:s}."
.format(repr(csphase))
)
header_list = None
if format.lower() == 'shtools':
if header is True:
coeffs, lmaxout, header_list = _shread(fname, lmax=lmax,
skip=skip, header=True)
else:
coeffs, lmaxout = _shread(fname, lmax=lmax, skip=skip)
elif format.lower() == 'npy':
coeffs = _np.load(fname, **kwargs)
lmaxout = coeffs.shape[1] - 1
else:
raise NotImplementedError(
'format={:s} not implemented.'.format(repr(format)))
if normalization.lower() == 'unnorm' and lmaxout > 85:
_warnings.warn("Calculations using unnormalized coefficients " +
"are stable only for degrees less than or equal " +
"to 85. lmax for the coefficients will be set to " +
"85. Input value is {:d}.".format(lmaxout),
category=RuntimeWarning)
lmaxout = 85
if _np.iscomplexobj(coeffs):
kind = 'complex'
else:
kind = 'real'
for cls in self.__subclasses__():
if cls.istype(kind):
return cls(coeffs, normalization=normalization.lower(),
csphase=csphase, header=header_list)
@classmethod
def from_cap(self, theta, lmax, clat=None, clon=None, normalization='4pi',
csphase=1, kind='real', degrees=True, copy=True):
"""
Initialize the class with spherical harmonic coefficients of a
spherical cap centered at the north pole.
Usage
-----
x = SHCoeffs.from_cap(theta, lmax, [clat, clon, normalization, csphase,
kind, degrees, copy])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
theta : float
The angular radius of the spherical cap, default in degrees.
lmax : int
The maximum spherical harmonic degree of the coefficients.
clat, clon : float, optional, default = None
Latitude and longitude of the center of the rotated spherical cap
(default in degrees).
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
kind : str, optional, default = 'real'
'real' or 'complex' spherical harmonic coefficients.
degrees : bool, optional = True
If True, theta, clat, and clon are in degrees.
copy : bool, optional, default = True
If True, make a copy of array when initializing the class instance.
If False, initialize the class instance with a reference to array.
Notes
-----
The spherical harmonic coefficients are normalized such that the
average value of the function is equal to 1. To rotate the cap to a
specified latitude and longitude, specify the optional parameters clat
and clon.
"""
if type(normalization) != str:
raise ValueError('normalization must be a string. ' +
'Input type is {:s}.'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value is {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be either 1 or -1. Input value is {:s}."
.format(repr(csphase))
)
if kind.lower() not in ('real', 'complex'):
raise ValueError(
"kind must be 'real' or 'complex'. " +
"Input value is {:s}.".format(repr(kind)))
if (clat is None and clon is not None) or \
(clat is not None and clon is None):
raise ValueError('clat and clon must both be input. ' +
'clat = {:s}, clon = {:s}.'
.format(repr(clat), repr(clon)))
if degrees is True:
theta = _np.deg2rad(theta)
cl = _shtools.SphericalCapCoef(theta, lmax)
coeffs = _np.zeros((2, lmax+1, lmax+1))
coeffs[0, 0:lmax+1, 0] = cl[0:lmax+1]
coeffs = _convert(coeffs, normalization_in='4pi',
normalization_out=normalization,
csphase_in=1, csphase_out=csphase
)
if kind == 'complex':
coeffs = _shtools.SHrtoc(coeffs)
for cls in self.__subclasses__():
if cls.istype(kind):
temp = cls(coeffs[:, 0:lmax+1, 0:lmax+1],
normalization=normalization.lower(),
csphase=csphase, copy=copy)
if clat is not None and clon is not None:
if degrees is True:
temp = temp.rotate(0., -90 + clat, -clon, degrees=True)
else:
temp = temp.rotate(0., -_np.pi/2. + clat, -clon,
degrees=False)
return temp
@classmethod
def from_netcdf(self, filename, lmax=None, normalization='4pi', csphase=1):
"""
Initialize the class with spherical harmonic coefficients from a
netcdf file.
Usage
-----
x = SHCoeffs.from_netcdf(filename, [lmax, normalization, csphase])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
filename : str
Name of the file, including path.
lmax : int, optional, default = None
The maximum spherical harmonic degree to read.
normalization : str, optional, default = '4pi'
Spherical harmonic normalization if not specified in the netcdf
file: '4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi
normalized, orthonormalized, Schmidt semi-normalized, or
unnormalized coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention if not specified in the netcdf
file: 1 to exclude the phase factor, or -1 to include it.
Description
-----------
The format of the netcdf file has to be exactly as the format that is
used in SHCoeffs.to_netcdf().
"""
ds = _xr.open_dataset(filename)
try:
normalization = ds.coeffs.normalization
except:
pass
if type(normalization) != str:
raise ValueError('normalization must be a string. '
'Input type was {:s}'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The input normalization must be '4pi', 'ortho', "
"'schmidt', or 'unnorm'. Provided value was {:s}"
.format(repr(normalization))
)
try:
csphase = ds.coeffs.csphase
except:
pass
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be 1 or -1. Input value was {:s}"
.format(repr(csphase))
)
lmaxout = ds.dims['degree'] - 1
c = _np.tril(ds.coeffs.data)
s = _np.triu(ds.coeffs.data, k=1)
s = _np.vstack([s[-1], s[:-1]])
s = _np.transpose(s)
if isinstance(lmax, int):
c, s = c[:lmax+1, :lmax+1], s[:lmax+1, :lmax+1]
lmaxout = lmax
if normalization.lower() == 'unnorm' and lmaxout > 85:
_warnings.warn("Calculations using unnormalized coefficients " +
"are stable only for degrees less than or equal " +
"to 85. lmax for the coefficients will be set to " +
"85. Input value was {:d}.".format(lmaxout),
category=RuntimeWarning)
lmaxout = 85
c, s = c[:lmaxout+1, :lmaxout+1], s[:lmaxout+1, :lmaxout+1]
coeffs = _np.array([c, s])
if _np.iscomplexobj(coeffs):
kind = 'complex'
else:
kind = 'real'
for cls in self.__subclasses__():
if cls.istype(kind):
return cls(coeffs, normalization=normalization.lower(),
csphase=csphase)
# ---- Define methods that modify internal variables ----
def set_coeffs(self, values, ls, ms):
"""
Set spherical harmonic coefficients in-place to specified values.
Usage
-----
x.set_coeffs(values, ls, ms)
Parameters
----------
values : float or complex (list)
The value(s) of the spherical harmonic coefficient(s).
ls : int (list)
The degree(s) of the coefficient(s) that should be set.
ms : int (list)
The order(s) of the coefficient(s) that should be set. Positive
and negative values correspond to the cosine and sine
components, respectively.
Examples
--------
x.set_coeffs(10., 1, 1) # x.coeffs[0, 1, 1] = 10.
x.set_coeffs(5., 1, -1) # x.coeffs[1, 1, 1] = 5.
x.set_coeffs([1., 2], [1, 2], [0, -2]) # x.coeffs[0, 1, 0] = 1.
# x.coeffs[1, 2, 2] = 2.
"""
# Ensure that the type is correct
values = _np.array(values)
ls = _np.array(ls)
ms = _np.array(ms)
mneg_mask = (ms < 0).astype(_np.int)
self.coeffs[mneg_mask, ls, _np.abs(ms)] = values
# ---- IO Routines
def to_file(self, filename, format='shtools', header=None, **kwargs):
"""
Save raw spherical harmonic coefficients to a file.
Usage
-----
x.to_file(filename, [format='shtools', header])
x.to_file(filename, [format='npy', **kwargs])
Parameters
----------
filename : str
Name of the output file.
format : str, optional, default = 'shtools'
'shtools' or 'npy'. See method from_file() for more information.
header : str, optional, default = None
A header string written to an 'shtools'-formatted file directly
before the spherical harmonic coefficients.
**kwargs : keyword argument list, optional for format = 'npy'
Keyword arguments of numpy.save().
Notes
-----
If format='shtools', the coefficients will be written to an ascii
formatted file. The first line of the file is an optional user provided
header line, and the spherical harmonic coefficients are then listed,
with increasing degree and order, with the format
l, m, coeffs[0, l, m], coeffs[1, l, m]
where l and m are the spherical harmonic degree and order,
respectively.
If format='npy', the spherical harmonic coefficients will be saved to
a binary numpy 'npy' file using numpy.save().
"""
if format == 'shtools':
with open(filename, mode='w') as file:
if header is not None:
file.write(header + '\n')
for l in range(self.lmax+1):
for m in range(l+1):
file.write('{:d}, {:d}, {:.16e}, {:.16e}\n'
.format(l, m, self.coeffs[0, l, m],
self.coeffs[1, l, m]))
elif format == 'npy':
_np.save(filename, self.coeffs, **kwargs)
else:
raise NotImplementedError(
'format={:s} not implemented.'.format(repr(format)))
def to_netcdf(self, filename, title='', description='', lmax=None):
"""
Return the coefficient data as a netcdf formatted file or object.
Usage
-----
x.to_netcdf(filename, [title, description, lmax])
Parameters
----------
filename : str
Name of the output file.
title : str, optional, default = ''
Title of the dataset
description : str, optional, default = ''
Description of the data.
lmax : int, optional, default = self.lmax
The maximum spherical harmonic degree to output.
"""
if lmax is None:
lmax = self.lmax
ds = _xr.Dataset()
ds.coords['degree'] = ('degree', _np.arange(lmax+1))
ds.coords['order'] = ('order', _np.arange(lmax+1))
# c coeffs as lower triangular matrix
c = self.coeffs[0, :lmax+1, :lmax+1]
# s coeffs as upper triangular matrix
s = _np.transpose(self.coeffs[1, :lmax+1, :lmax+1])
s = _np.vstack([s[1:], s[0]])
ds['coeffs'] = (('degree', 'order'), c + s)
ds['coeffs'].attrs['title'] = title
ds['coeffs'].attrs['description'] = description
ds['coeffs'].attrs['normalization'] = self.normalization
ds['coeffs'].attrs['csphase'] = self.csphase
ds.to_netcdf(filename)
def to_array(self, normalization=None, csphase=None, lmax=None):
"""
Return spherical harmonic coefficients as a numpy array.
Usage
-----
coeffs = x.to_array([normalization, csphase, lmax])
Returns
-------
coeffs : ndarry, shape (2, lmax+1, lmax+1)
numpy ndarray of the spherical harmonic coefficients.
Parameters
----------
normalization : str, optional, default = x.normalization
Normalization of the output coefficients: '4pi', 'ortho',
'schmidt', or 'unnorm' for geodesy 4pi normalized, orthonormalized,
Schmidt semi-normalized, or unnormalized coefficients,
respectively.
csphase : int, optional, default = x.csphase
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
lmax : int, optional, default = x.lmax
Maximum spherical harmonic degree to output. If lmax is greater
than x.lmax, the array will be zero padded.
Notes
-----
This method will return an array of the spherical harmonic coefficients
using a different normalization and Condon-Shortley phase convention,
and a different maximum spherical harmonic degree. If the maximum
degree is smaller than the maximum degree of the class instance, the
coefficients will be truncated. Conversely, if this degree is larger
than the maximum degree of the class instance, the output array will be
zero padded.
"""
if normalization is None:
normalization = self.normalization
if csphase is None:
csphase = self.csphase
if lmax is None:
lmax = self.lmax
coeffs = _convert(self.coeffs, normalization_in=self.normalization,
normalization_out=normalization,
csphase_in=self.csphase, csphase_out=csphase,
lmax=lmax)
return coeffs
def copy(self):
"""
Return a deep copy of the class instance.
Usage
-----
copy = x.copy()
"""
return _copy.deepcopy(self)
def info(self):
"""
Print a summary of the data stored in the SHCoeffs instance.
Usage
-----
x.info()
"""
print(repr(self))
# ---- Mathematical operators ----
def __add__(self, other):
"""
Add two similar sets of coefficients or coefficients and a scalar:
self + other. For the addition of a scalar, only the degree 0
term is modified.
"""
if isinstance(other, SHCoeffs):
if (self.normalization == other.normalization and self.csphase ==
other.csphase and self.kind == other.kind and
self.lmax == other.lmax):
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = (self.coeffs[self.mask] +
other.coeffs[self.mask])
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise ValueError('The two sets of coefficients must have the '
'same kind, normalization, csphase and '
'lmax.')
elif _np.isscalar(other) is True:
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not add a complex constant to real '
'coefficients.')
coeffs = self.coeffs.copy()
coeffs[0, 0, 0] += other
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise NotImplementedError('Mathematical operator not implemented '
'for these operands.')
def __radd__(self, other):
"""
Add two similar sets of coefficients or coefficients and a scalar:
other + self. For the addition of a scalar, only the degree 0
term is modified.
"""
return self.__add__(other)
def __sub__(self, other):
"""
Subtract two similar sets of coefficients or coefficients and a scalar:
self - other. For the subtraction of a scalar, only the degree 0
term is modified.
"""
if isinstance(other, SHCoeffs):
if (self.normalization == other.normalization and self.csphase ==
other.csphase and self.kind == other.kind and
self.lmax == other.lmax):
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = (self.coeffs[self.mask] -
other.coeffs[self.mask])
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise ValueError('The two sets of coefficients must have the '
'same kind, normalization, csphase and '
'lmax.')
elif _np.isscalar(other) is True:
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not subtract a complex constant from '
'real coefficients.')
coeffs = self.coeffs.copy()
coeffs[0, 0, 0] -= other
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise NotImplementedError('Mathematical operator not implemented '
'for these operands.')
def __rsub__(self, other):
"""
Subtract two similar sets of coefficients or coefficients and a scalar:
other - self. For the subtraction from a scalar, self is multiplied by
-1 and then other is added to the degree 0 coefficient.
"""
if isinstance(other, SHCoeffs):
if (self.normalization == other.normalization and self.csphase ==
other.csphase and self.kind == other.kind and
self.lmax == other.lmax):
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = (other.coeffs[self.mask] -
self.coeffs[self.mask])
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise ValueError('The two sets of coefficients must have the '
'same kind, normalization, csphase and '
'lmax.')
elif _np.isscalar(other) is True:
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not subtract a complex constant from '
'real coefficients.')
coeffs = - self.coeffs.copy()
coeffs[0, 0, 0] += other
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise NotImplementedError('Mathematical operator not implemented '
'for these operands.')
def __mul__(self, other):
"""
Multiply two similar sets of coefficients or coefficients and a scalar:
self * other.
"""
if isinstance(other, SHCoeffs):
if (self.normalization == other.normalization and self.csphase ==
other.csphase and self.kind == other.kind and
self.lmax == other.lmax):
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = (self.coeffs[self.mask] *
other.coeffs[self.mask])
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise ValueError('The two sets of coefficients must have the '
'same kind, normalization, csphase and '
'lmax.')
elif _np.isscalar(other) is True:
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not multiply real coefficients by '
'a complex constant.')
coeffs[self.mask] = self.coeffs[self.mask] * other
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise NotImplementedError('Mathematical operator not implemented '
'for these operands.')
def __rmul__(self, other):
"""
Multiply two similar sets of coefficients or coefficients and a scalar:
other * self.
"""
return self.__mul__(other)
def __truediv__(self, other):
"""
Divide two similar sets of coefficients or coefficients and a scalar:
self / other.
"""
if isinstance(other, SHCoeffs):
if (self.normalization == other.normalization and self.csphase ==
other.csphase and self.kind == other.kind and
self.lmax == other.lmax):
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = (self.coeffs[self.mask] /
other.coeffs[self.mask])
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise ValueError('The two sets of coefficients must have the '
'same kind, normalization, csphase and '
'lmax.')
elif _np.isscalar(other) is True:
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not multiply real coefficients by '
'a complex constant.')
coeffs[self.mask] = self.coeffs[self.mask] / other
return SHCoeffs.from_array(coeffs, csphase=self.csphase,
normalization=self.normalization)
else:
raise NotImplementedError('Mathematical operator not implemented '
'for these operands.')
def __pow__(self, other):
"""
Raise the spherical harmonic coefficients to a scalar power:
pow(self, other).
"""
if _np.isscalar(other) is True:
return SHCoeffs.from_array(pow(self.coeffs, other),
csphase=self.csphase,
normalization=self.normalization)
else:
raise NotImplementedError('Mathematical operator not implemented '
'for these operands.')
def __repr__(self):
return ('kind = {:s}\n'
'normalization = {:s}\n'
'csphase = {:d}\n'
'lmax = {:d}\n'
'header = {:s}'.format(
repr(self.kind), repr(self.normalization), self.csphase,
self.lmax, repr(self.header)))
# ---- Extract data ----
def degrees(self):
"""
Return a numpy array with the spherical harmonic degrees from 0 to
lmax.
Usage
-----
degrees = x.degrees()
Returns
-------
degrees : ndarray, shape (lmax+1)
1-D numpy ndarray listing the spherical harmonic degrees, where
lmax is the maximum spherical harmonic degree.
"""
return _np.arange(self.lmax + 1)
def spectrum(self, lmax=None, convention='power', unit='per_l', base=10.):
"""
Return the spectrum as a function of spherical harmonic degree.
Usage
-----
spectrum = x.spectrum([lmax, convention, unit, base])
Returns
-------
spectrum : ndarray, shape (lmax+1)
1-D numpy ndarray of the spectrum, where lmax is the maximum
spherical harmonic degree.
Parameters
----------
lmax : int, optional, default = x.lmax
Maximum spherical harmonic degree of the spectrum to output.
convention : str, optional, default = 'power'
The type of spectrum to return: 'power' for power spectrum,
'energy' for energy spectrum, and 'l2norm' for the l2 norm
spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Notes
-----
This method returns either the power spectrum, energy spectrum, or
l2-norm spectrum. Total power is defined as the integral of the
function squared over all space, divided by the area the function
spans. If the mean of the function is zero, this is equivalent to the
variance of the function. The total energy is the integral of the
function squared over all space and is 4pi times the total power. For
normalized coefficients ('4pi', 'ortho', or 'schmidt'), the l2-norm is
the sum of the magnitude of the coefficients squared.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, which is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the
contribution to the total spectrum from all angular orders over an
infinitessimal logarithmic degree band. The contrubution in the band
dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base,
and where spectrum(l, 'per_dlogl) is equal to
spectrum(l, 'per_l')*l*log(a).
"""
return _spectrum(self.coeffs, normalization=self.normalization,
convention=convention, unit=unit, base=base,
lmax=lmax)
def cross_spectrum(self, clm, lmax=None, convention='power', unit='per_l',
base=10.):
"""
Return the cross-spectrum of two functions.
Usage
-----
cross_spectrum = x.cross_spectrum(clm, [lmax, convention, unit, base])
Returns
-------
cross_spectrum : ndarray, shape (lmax+1)
1-D numpy ndarray of the cross-spectrum, where lmax is the maximum
spherical harmonic degree.
Parameters
----------
clm : SHCoeffs class instance.
The second function used in computing the cross-spectrum.
lmax : int, optional, default = x.lmax
Maximum spherical harmonic degree of the spectrum to output.
convention : str, optional, default = 'power'
The type of spectrum to return: 'power' for power spectrum,
'energy' for energy spectrum, and 'l2norm' for the l2 norm
spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Notes
-----
This method returns either the cross-power spectrum, cross-energy
spectrum, or l2-cross-norm spectrum. Total cross-power is defined as
the integral of the function times the conjugate of clm over all space,
divided by the area the functions span. If the means of the functions
are zero, this is equivalent to the covariance of the two functions.
The total cross-energy is the integral of this function times the
conjugate of clm over all space and is 4pi times the total power. The
l2-cross-norm is the sum of this function times the conjugate of clm
over all angular orders as a function of spherical harmonic degree.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, and is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the
contribution to the total spectrum from all angular orders over an
infinitessimal logarithmic degree band. The contrubution in the band
dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base,
and where spectrum(l, 'per_dlogl) is equal to
spectrum(l, 'per_l')*l*log(a).
"""
if not isinstance(clm, SHCoeffs):
raise ValueError('clm must be an SHCoeffs class instance. Input '
'type is {:s}.'.format(repr(type(clm))))
if lmax is None:
lmax = min(self.lmax, clm.lmax)
return _cross_spectrum(self.coeffs,
clm.to_array(normalization=self.normalization,
csphase=self.csphase,
lmax=lmax),
normalization=self.normalization,
convention=convention, unit=unit, base=base,
lmax=lmax)
def volume(self, lmax=None):
"""
If the function is the real shape of an object, calculate the volume
of the body.
Usage
-----
volume = x.volume([lmax])
Returns
-------
volume : float
The volume of the object.
Parameters
----------
lmax : int, optional, default = x.lmax
The maximum spherical harmonic degree to use when calculating the
volume.
Notes
-----
If the function is the real shape of an object, this method will
calculate the volume of the body exactly by integration. This routine
raises the function to the nth power, with n from 1 to 3, and
calculates the spherical harmonic degree and order 0 term. To avoid
aliases, the function is first expand on a grid that can resolve
spherical harmonic degrees up to 3*lmax.
"""
if self.coeffs[0, 0, 0] == 0:
raise ValueError('The volume of the object can not be calculated '
'when the degree and order 0 term is equal to '
'zero.')
if self.kind == 'complex':
raise ValueError('The volume of the object can not be calculated '
'for complex functions.')
if lmax is None:
lmax = self.lmax
r0 = self.coeffs[0, 0, 0]
grid = self.expand(lmax=min(3*lmax, 2800)) - r0
h200 = (grid**2).expand(lmax_calc=0).coeffs[0, 0, 0]
h300 = (grid**3).expand(lmax_calc=0).coeffs[0, 0, 0]
volume = 4 * _np.pi / 3 * (h300 + 3 * r0 * h200 + r0**3)
return volume
def centroid(self):
"""
Compute the centroid of the body in Cartesian coordinates.
Usage
-----
centroid = x.centroid()
Returns
-------
[x, y, z] : ndarray
The centroid of the object in meters.
Notes
-----
The centroid is computed as the center of mass of a homogeneous body.
The units of the input function must be in meters.
"""
from .shgravcoeffs import SHGravCoeffs as _SHGravCoeffs
from ..constant import G as _G
density = 1.
gm = density * _G.value * self.volume()
potential = _SHGravCoeffs.from_shape(self, density, gm)
return potential.center_of_mass
# ---- Operations that return a new SHGravCoeffs class instance ----
def rotate(self, alpha, beta, gamma, degrees=True, convention='y',
body=False, dj_matrix=None):
"""
Rotate either the coordinate system used to express the spherical
harmonic coefficients or the physical body, and return a new class
instance.
Usage
-----
x_rotated = x.rotate(alpha, beta, gamma, [degrees, convention,
body, dj_matrix])
Returns
-------
x_rotated : SHCoeffs class instance
Parameters
----------
alpha, beta, gamma : float
The three Euler rotation angles in degrees.
degrees : bool, optional, default = True
True if the Euler angles are in degrees, False if they are in
radians.
convention : str, optional, default = 'y'
The convention used for the rotation of the second angle, which
can be either 'x' or 'y' for a rotation about the x or y axes,
respectively.
body : bool, optional, default = False
If true, rotate the physical body and not the coordinate system.
dj_matrix : ndarray, optional, default = None
The djpi2 rotation matrix computed by a call to djpi2.
Notes
-----
This method will take the spherical harmonic coefficients of a
function, rotate the coordinate frame by the three Euler anlges, and
output the spherical harmonic coefficients of the new function. If
the optional parameter body is set to True, then the physical body will
be rotated instead of the coordinate system.
The rotation of a coordinate system or body can be viewed in two
complementary ways involving three successive rotations. Both methods
have the same initial and final configurations, and the angles listed
in both schemes are the same.
Scheme A:
(I) Rotation about the z axis by alpha.
(II) Rotation about the new y axis by beta.
(III) Rotation about the new z axis by gamma.
Scheme B:
(I) Rotation about the z axis by gamma.
(II) Rotation about the initial y axis by beta.
(III) Rotation about the initial z axis by alpha.
Here, the 'y convention' is employed, where the second rotation is with
respect to the y axis. When using the 'x convention', the second
rotation is instead with respect to the x axis. The relation between
the Euler angles in the x and y conventions is given by
alpha_y=alpha_x-pi/2, beta_y=beta_x, and gamma_y=gamma_x+pi/2.
To perform the inverse transform associated with the three angles
(alpha, beta, gamma), one would perform an additional rotation using
the angles (-gamma, -beta, -alpha).
The rotations can be viewed either as a rotation of the coordinate
system or the physical body. To rotate the physical body without
rotation of the coordinate system, set the optional parameter body to
True. This rotation is accomplished by performing the inverse rotation
using the angles (-gamma, -beta, -alpha).
"""
if type(convention) != str:
raise ValueError('convention must be a string. Input type is {:s}.'
.format(str(type(convention))))
if convention.lower() not in ('x', 'y'):
raise ValueError(
"convention must be either 'x' or 'y'. " +
"Provided value is {:s}.".format(repr(convention))
)
if convention == 'y':
if body is True:
angles = _np.array([-gamma, -beta, -alpha])
else:
angles = _np.array([alpha, beta, gamma])
elif convention == 'x':
if body is True:
angles = _np.array([-gamma - _np.pi/2, -beta,
-alpha + _np.pi/2])
else:
angles = _np.array([alpha - _np.pi/2, beta, gamma + _np.pi/2])
if degrees:
angles = _np.radians(angles)
if self.lmax > 1200:
_warnings.warn("The rotate() method is accurate only to about" +
" spherical harmonic degree 1200. " +
"lmax = {:d}".format(self.lmax),
category=RuntimeWarning)
rot = self._rotate(angles, dj_matrix)
return rot
def convert(self, normalization=None, csphase=None, lmax=None, kind=None,
check=True):
"""
Return a SHCoeffs class instance with a different normalization
convention.
Usage
-----
clm = x.convert([normalization, csphase, lmax, kind, check])
Returns
-------
clm : SHCoeffs class instance
Parameters
----------
normalization : str, optional, default = x.normalization
Normalization of the output class: '4pi', 'ortho', 'schmidt', or
'unnorm', for geodesy 4pi normalized, orthonormalized, Schmidt
semi-normalized, or unnormalized coefficients, respectively.
csphase : int, optional, default = x.csphase
Condon-Shortley phase convention for the output class: 1 to exclude
the phase factor, or -1 to include it.
lmax : int, optional, default = x.lmax
Maximum spherical harmonic degree to output.
kind : str, optional, default = clm.kind
'real' or 'complex' spherical harmonic coefficients for the output
class.
check : bool, optional, default = True
When converting complex coefficients to real coefficients, if True,
check if function is entirely real.
Notes
-----
This method will return a new class instance of the spherical
harmonic coefficients using a different normalization and
Condon-Shortley phase convention. The coefficients can be converted
between real and complex form, and a different maximum spherical
harmonic degree of the output coefficients can be specified. If this
maximum degree is smaller than the maximum degree of the original
class, the coefficients will be truncated. Conversely, if this degree
is larger than the maximum degree of the original class, the
coefficients of the new class will be zero padded.
"""
if normalization is None:
normalization = self.normalization
if csphase is None:
csphase = self.csphase
if lmax is None:
lmax = self.lmax
if kind is None:
kind = self.kind
# check argument consistency
if type(normalization) != str:
raise ValueError('normalization must be a string. ' +
'Input type is {:s}.'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Provided value is {:s}."
.format(repr(normalization)))
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be 1 or -1. Input value is {:s}."
.format(repr(csphase)))
if (kind != self.kind):
if (kind == 'complex'):
temp = self._make_complex()
else:
temp = self._make_real(check=check)
coeffs = temp.to_array(normalization=normalization.lower(),
csphase=csphase, lmax=lmax)
else:
coeffs = self.to_array(normalization=normalization.lower(),
csphase=csphase, lmax=lmax)
return SHCoeffs.from_array(coeffs,
normalization=normalization.lower(),
csphase=csphase, copy=False)
def pad(self, lmax, copy=True):
"""
Return a SHCoeffs class where the coefficients are zero padded or
truncated to a different lmax.
Usage
-----
clm = x.pad(lmax, [copy])
Returns
-------
clm : SHCoeffs class instance
Parameters
----------
lmax : int
Maximum spherical harmonic degree to output.
copy : bool, optional, default = True
If True, make a copy of x when initializing the class instance.
If False, modify x itself.
"""
if copy:
clm = self.copy()
else:
clm = self
if lmax <= self.lmax:
clm.coeffs = clm.coeffs[:, :lmax+1, :lmax+1]
clm.mask = clm.mask[:, :lmax+1, :lmax+1]
else:
clm.coeffs = _np.pad(clm.coeffs, ((0, 0), (0, lmax - self.lmax),
(0, lmax - self.lmax)), 'constant')
mask = _np.zeros((2, lmax + 1, lmax + 1), dtype=_np.bool)
for l in _np.arange(lmax + 1):
mask[:, l, :l + 1] = True
mask[1, :, 0] = False
clm.mask = mask
clm.lmax = lmax
return clm
# ---- Expand the coefficients onto a grid ----
def expand(self, grid='DH2', lat=None, colat=None, lon=None, degrees=True,
zeros=None, lmax=None, lmax_calc=None, extend=True):
"""
Evaluate the spherical harmonic coefficients either on a global grid
or for a list of coordinates.
Usage
-----
f = x.expand([grid, lmax, lmax_calc, zeros])
g = x.expand(lat=lat, lon=lon, [lmax_calc, degrees])
g = x.expand(colat=colat, lon=lon, [lmax_calc, degrees])
Returns
-------
f : SHGrid class instance
g : float, ndarray, or list
Parameters
----------
lat : int, float, ndarray, or list, optional, default = None
Latitude coordinates where the function is to be evaluated.
colat : int, float, ndarray, or list, optional, default = None
Colatitude coordinates where the function is to be evaluated.
lon : int, float, ndarray, or list, optional, default = None
Longitude coordinates where the function is to be evaluated.
degrees : bool, optional, default = True
True if lat, colat and lon are in degrees, False if in radians.
grid : str, optional, default = 'DH2'
'DH' or 'DH1' for an equisampled lat/lon grid with nlat=nlon,
'DH2' for an equidistant lat/lon grid with nlon=2*nlat, or 'GLQ'
for a Gauss-Legendre quadrature grid.
lmax : int, optional, default = x.lmax
The maximum spherical harmonic degree, which determines the grid
spacing of the output grid.
lmax_calc : int, optional, default = x.lmax
The maximum spherical harmonic degree to use when evaluating the
function.
extend : bool, optional, default = True
If True, compute the longitudinal band for 360 E (DH and GLQ grids)
and the latitudinal band for 90 S (DH grids only).
zeros : ndarray, optional, default = None
The cos(colatitude) nodes used in the Gauss-Legendre Quadrature
grids.
Notes
-----
This method either (1) evaluates the spherical harmonic coefficients on
a global grid and returns an SHGrid class instance, or (2) evaluates
the spherical harmonic coefficients for a list of (co)latitude and
longitude coordinates. For the first case, the grid type is defined
by the optional parameter grid, which can be 'DH', 'DH2' or 'GLQ'.For
the second case, the optional parameters lon and either colat or lat
must be provided.
"""
if lat is not None and colat is not None:
raise ValueError('lat and colat can not both be specified.')
if lat is not None and lon is not None:
if lmax_calc is None:
lmax_calc = self.lmax
values = self._expand_coord(lat=lat, lon=lon, degrees=degrees,
lmax_calc=lmax_calc)
return values
if colat is not None and lon is not None:
if lmax_calc is None:
lmax_calc = self.lmax
if type(colat) is list:
lat = list(map(lambda x: 90 - x, colat))
else:
lat = 90 - colat
values = self._expand_coord(lat=lat, lon=lon, degrees=degrees,
lmax_calc=lmax_calc)
return values
else:
if lmax is None:
lmax = self.lmax
if lmax_calc is None:
lmax_calc = lmax
if type(grid) != str:
raise ValueError('grid must be a string. Input type is {:s}.'
.format(str(type(grid))))
if grid.upper() in ('DH', 'DH1'):
gridout = self._expandDH(sampling=1, lmax=lmax,
lmax_calc=lmax_calc, extend=extend)
elif grid.upper() == 'DH2':
gridout = self._expandDH(sampling=2, lmax=lmax,
lmax_calc=lmax_calc, extend=extend)
elif grid.upper() == 'GLQ':
gridout = self._expandGLQ(zeros=zeros, lmax=lmax,
lmax_calc=lmax_calc, extend=extend)
else:
raise ValueError(
"grid must be 'DH', 'DH1', 'DH2', or 'GLQ'. " +
"Input value is {:s}.".format(repr(grid)))
return gridout
# ---- Plotting routines ----
def plot_spectrum(self, convention='power', unit='per_l', base=10.,
lmax=None, xscale='lin', yscale='log', grid=True,
legend=None, axes_labelsize=None, tick_labelsize=None,
show=True, ax=None, fname=None, **kwargs):
"""
Plot the spectrum as a function of spherical harmonic degree.
Usage
-----
x.plot_spectrum([convention, unit, base, lmax, xscale, yscale, grid,
axes_labelsize, tick_labelsize, legend, show, ax,
fname, **kwargs])
Parameters
----------
convention : str, optional, default = 'power'
The type of spectrum to plot: 'power' for power spectrum,
'energy' for energy spectrum, and 'l2norm' for the l2 norm
spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', plot the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', plot the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', plot the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum, and
the base to use for logarithmic axes.
lmax : int, optional, default = self.lmax
The maximum spherical harmonic degree to plot.
xscale : str, optional, default = 'lin'
Scale of the x axis: 'lin' for linear or 'log' for logarithmic.
yscale : str, optional, default = 'log'
Scale of the y axis: 'lin' for linear or 'log' for logarithmic.
grid : bool, optional, default = True
If True, plot grid lines.
legend : str, optional, default = None
Text to use for the legend.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
show : bool, optional, default = True
If True, plot to the screen.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
**kwargs : keyword arguments, optional
Keyword arguments for pyplot.plot().
Notes
-----
This method plots either the power spectrum, energy spectrum, or
l2-norm spectrum. Total power is defined as the integral of the
function squared over all space, divided by the area the function
spans. If the mean of the function is zero, this is equivalent to the
variance of the function. The total energy is the integral of the
function squared over all space and is 4pi times the total power. For
normalized coefficients ('4pi', 'ortho', or 'schmidt'), the l2-norm is
the sum of the magnitude of the coefficients squared.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, which is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the
contribution to the total spectrum from all angular orders over an
infinitessimal logarithmic degree band. The contrubution in the band
dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base,
and where spectrum(l, 'per_dlogl) is equal to
spectrum(l, 'per_l')*l*log(a).
"""
if lmax is None:
lmax = self.lmax
spectrum = self.spectrum(convention=convention, unit=unit, base=base,
lmax=lmax)
ls = _np.arange(lmax + 1)
if ax is None:
fig, axes = _plt.subplots(1, 1)
else:
axes = ax
if axes_labelsize is None:
axes_labelsize = _mpl.rcParams['axes.labelsize']
if tick_labelsize is None:
tick_labelsize = _mpl.rcParams['xtick.labelsize']
axes.set_xlabel('Spherical harmonic degree', fontsize=axes_labelsize)
if convention == 'Energy':
axes.set_ylabel('Energy', fontsize=axes_labelsize)
if legend is None:
if (unit == 'per_l'):
legend = 'Energy per degree'
elif (unit == 'per_lm'):
legend = 'Energy per coefficient'
elif (unit == 'per_dlogl'):
legend = 'Energy per log bandwidth'
elif convention == 'l2norm':
axes.set_ylabel('l2 norm', fontsize=axes_labelsize)
if legend is None:
if (unit == 'per_l'):
legend = 'l2 norm per degree'
elif (unit == 'per_lm'):
legend = 'l2 norm per coefficient'
elif (unit == 'per_dlogl'):
legend = 'l2 norm per log bandwidth'
else:
axes.set_ylabel('Power', fontsize=axes_labelsize)
if legend is None:
if (unit == 'per_l'):
legend = 'Power per degree'
elif (unit == 'per_lm'):
legend = 'Power per coefficient'
elif (unit == 'per_dlogl'):
legend = 'Power per log bandwidth'
if xscale == 'log':
axes.set_xscale('log', basex=base)
if yscale == 'log':
axes.set_yscale('log', basey=base)
if xscale == 'log':
axes.plot(ls[1:lmax+1], spectrum[1:lmax+1], label=legend, **kwargs)
else:
axes.plot(ls[:lmax+1], spectrum[:lmax+1], label=legend, **kwargs)
axes.set(xlim=(ls[0], ls[lmax]))
axes.grid(grid, which='major')
axes.minorticks_on()
axes.tick_params(labelsize=tick_labelsize)
axes.legend()
if ax is None:
fig.tight_layout(pad=0.5)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes
def plot_cross_spectrum(self, clm, convention='power', unit='per_l',
base=10., lmax=None, xscale='lin', yscale='log',
grid=True, legend=None, axes_labelsize=None,
tick_labelsize=None, show=True, ax=None,
fname=None, **kwargs):
"""
Plot the cross-spectrum of two functions.
Usage
-----
x.plot_cross_spectrum(clm, [convention, unit, base, lmax, xscale,
yscale, grid, axes_labelsize,
tick_labelsize, legend, show, ax,
fname, **kwargs])
Parameters
----------
clm : SHCoeffs class instance.
The second function used in computing the cross-spectrum.
convention : str, optional, default = 'power'
The type of spectrum to plot: 'power' for power spectrum,
'energy' for energy spectrum, and 'l2norm' for the l2 norm
spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', plot the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', plot the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', plot the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum, and
the base to use for logarithmic axes.
lmax : int, optional, default = self.lmax
The maximum spherical harmonic degree to plot.
xscale : str, optional, default = 'lin'
Scale of the x axis: 'lin' for linear or 'log' for logarithmic.
yscale : str, optional, default = 'log'
Scale of the y axis: 'lin' for linear or 'log' for logarithmic.
grid : bool, optional, default = True
If True, plot grid lines.
legend : str, optional, default = None
Text to use for the legend.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
show : bool, optional, default = True
If True, plot to the screen.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
**kwargs : keyword arguments, optional
Keyword arguments for pyplot.plot().
Notes
-----
This method plots either the cross-power spectrum, cross-energy
spectrum, or l2-cross-norm spectrum. Total cross-power is defined as
the integral of the function times the conjugate of clm over all space,
divided by the area the functions span. If the means of the functions
are zero, this is equivalent to the covariance of the two functions.
The total cross-energy is the integral of this function times the
conjugate of clm over all space and is 4pi times the total power. The
l2-cross-norm is the sum of this function times the conjugate of clm
over all angular orders as a function of spherical harmonic degree.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, and is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the
contribution to the total spectrum from all angular orders over an
infinitessimal logarithmic degree band. The contrubution in the band
dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base,
and where spectrum(l, 'per_dlogl) is equal to
spectrum(l, 'per_l')*l*log(a). If the input fields are complex, the
absolute value of the cross-spectrum will be plotted.
"""
if not isinstance(clm, SHCoeffs):
raise ValueError('clm must be an SHCoeffs class instance. Input '
'type is {:s}.'.format(repr(type(clm))))
if lmax is None:
lmax = min(self.lmax, clm.lmax)
spectrum = self.cross_spectrum(clm, convention=convention, unit=unit,
base=base, lmax=lmax)
spectrum = abs(spectrum)
ls = _np.arange(lmax + 1)
if ax is None:
fig, axes = _plt.subplots(1, 1)
else:
axes = ax
if axes_labelsize is None:
axes_labelsize = _mpl.rcParams['axes.labelsize']
if tick_labelsize is None:
tick_labelsize = _mpl.rcParams['xtick.labelsize']
axes.set_xlabel('Spherical harmonic degree', fontsize=axes_labelsize)
if convention == 'Energy':
axes.set_ylabel('Energy', fontsize=axes_labelsize)
if legend is None:
if (unit == 'per_l'):
legend = 'Energy per degree'
elif (unit == 'per_lm'):
legend = 'Energy per coefficient'
elif (unit == 'per_dlogl'):
legend = 'Energy per log bandwidth'
elif convention == 'l2norm':
axes.set_ylabel('l2 norm', fontsize=axes_labelsize)
if legend is None:
if (unit == 'per_l'):
legend = 'l2 norm per degree'
elif (unit == 'per_lm'):
legend = 'l2 norm per coefficient'
elif (unit == 'per_dlogl'):
legend = 'l2 norm per log bandwidth'
else:
axes.set_ylabel('Power', fontsize=axes_labelsize)
if legend is None:
if (unit == 'per_l'):
legend = 'Power per degree'
elif (unit == 'per_lm'):
legend = 'Power per coefficient'
elif (unit == 'per_dlogl'):
legend = 'Power per log bandwidth'
if xscale == 'log':
axes.set_xscale('log', basex=base)
if yscale == 'log':
axes.set_yscale('log', basey=base)
if xscale == 'log':
axes.plot(ls[1:lmax+1], spectrum[1:lmax+1], label=legend, **kwargs)
else:
axes.plot(ls[:lmax+1], spectrum[:lmax+1], label=legend, **kwargs)
axes.set(xlim=(ls[0], ls[lmax]))
axes.grid(grid, which='major')
axes.minorticks_on()
axes.tick_params(labelsize=tick_labelsize)
axes.legend()
if ax is None:
fig.tight_layout(pad=0.5)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes
def plot_spectrum2d(self, convention='power', xscale='lin', yscale='lin',
grid=True, axes_labelsize=None, tick_labelsize=None,
vscale='log', vrange=None, vmin=None, vmax=None,
lmax=None, show=True, ax=None, fname=None):
"""
Plot the spectrum as a function of spherical harmonic degree and order.
Usage
-----
x.plot_spectrum2d([convention, xscale, yscale, grid, axes_labelsize,
tick_labelsize, vscale, vrange, vmin, vmax, lmax,
show, ax, fname])
Parameters
----------
convention : str, optional, default = 'power'
The type of spectrum to plot: 'power' for power spectrum,
'energy' for energy spectrum, and 'l2norm' for the l2 norm
spectrum.
xscale : str, optional, default = 'lin'
Scale of the l axis: 'lin' for linear or 'log' for logarithmic.
yscale : str, optional, default = 'lin'
Scale of the m axis: 'lin' for linear or 'log' for logarithmic.
grid : bool, optional, default = True
If True, plot grid lines.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
vscale : str, optional, default = 'log'
Scale of the color axis: 'lin' for linear or 'log' for logarithmic.
vrange : (float, float), optional, default = None
Colormap range (min, max) relative to the maximum value. If None,
scale the image to the maximum and minimum values.
vmin : float, optional, default=None
The minmum range of the colormap. If None, the minimum value of the
spectrum will be used.
vmax : float, optional, default=None
The maximum range of the colormap. If None, the maximum value of
the spectrum will be used.
lmax : int, optional, default = self.lmax
The maximum spherical harmonic degree to plot.
show : bool, optional, default = True
If True, plot to the screen.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
Notes
-----
This method plots either the power, energy, or l2-norm for each
spherical harmonic degree and order of the function. Total power is
defined as the integral of the function squared over all space,
divided by the area the function spans. If the mean of the function is
zero, this is equivalent to the variance of the function. The total
energy is the integral of the function squared over all space and is
4pi times the total power. For normalized coefficients ('4pi',
'ortho', or 'schmidt'), the l2-norm is the sum of the magnitude of the
coefficients squared.
"""
if axes_labelsize is None:
axes_labelsize = _mpl.rcParams['axes.labelsize']
if tick_labelsize is None:
tick_labelsize = _mpl.rcParams['xtick.labelsize']
if lmax is None:
lmax = self.lmax
degrees = _np.arange(lmax + 1)
# Create the matrix of the spectrum for each coefficient
spectrum = _np.empty((lmax + 1, 2 * lmax + 1))
mpositive = self.coeffs[0, :lmax + 1, :lmax + 1] * \
self.coeffs[0, :lmax + 1, :lmax + 1].conj()
mpositive[~self.mask[0, :lmax + 1, :lmax + 1]] = _np.nan
mnegative = self.coeffs[1, :lmax + 1, :lmax + 1] * \
self.coeffs[1, :lmax + 1, :lmax + 1].conj()
mnegative[~self.mask[1, :lmax + 1, :lmax + 1]] = _np.nan
spectrum[:, :lmax] = _np.fliplr(mnegative)[:, :lmax]
spectrum[:, lmax:] = mpositive
if (convention.lower() == 'l2norm'):
if self.normalization == 'unnorm':
raise ValueError("convention can not be set to 'l2norm' " +
"when using unnormalized harmonics.")
else:
pass
elif convention.lower() in ('power', 'energy'):
if self.normalization == '4pi':
pass
elif self.normalization == 'schmidt':
for l in degrees:
spectrum[l, :] /= (2. * l + 1.)
elif self.normalization == 'ortho':
for l in degrees:
spectrum[l, :] /= (4. * _np.pi)
elif self.normalization == 'unnorm':
for l in degrees:
ms = _np.arange(l+1)
conv = _factorial(l+ms) / (2. * l + 1.) / _factorial(l-ms)
if self.kind == 'real':
conv[1:l + 1] = conv[1:l + 1] / 2.
spectrum[l, lmax-l:lmax] *= conv[::-1][0:l]
spectrum[l, lmax:lmax+l+1] *= conv[0:l+1]
else:
raise ValueError(
"normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value is {:s}."
.format(repr(self.normalization)))
else:
raise ValueError(
"convention must be 'power', 'energy', or 'l2norm'. " +
"Input value is {:s}.".format(repr(convention)))
if convention == 'energy':
spectrum *= 4.0 * _np.pi
spectrum_masked = _np.ma.masked_invalid(spectrum)
# need to add one extra value to each in order for pcolormesh
# to plot the last row and column.
ls = _np.arange(lmax+2).astype(_np.float)
ms = _np.arange(-lmax, lmax + 2, dtype=_np.float)
lgrid, mgrid = _np.meshgrid(ls, ms, indexing='ij')
lgrid -= 0.5
mgrid -= 0.5
if ax is None:
fig, axes = _plt.subplots()
else:
axes = ax
if vrange is not None:
vmin = _np.nanmax(spectrum) * vrange[0]
vmax = _np.nanmax(spectrum) * vrange[1]
else:
if vmin is None:
_temp = spectrum
_temp[_temp == 0] = _np.NaN
vmin = _np.nanmin(_temp)
if vmax is None:
vmax = _np.nanmax(spectrum)
if vscale.lower() == 'log':
norm = _mpl.colors.LogNorm(vmin, vmax, clip=True)
# Clipping is required to avoid an invalid value error
elif vscale.lower() == 'lin':
norm = _plt.Normalize(vmin, vmax)
else:
raise ValueError(
"vscale must be 'lin' or 'log'. " +
"Input value is {:s}.".format(repr(vscale)))
if (xscale == 'lin'):
cmesh = axes.pcolormesh(lgrid, mgrid, spectrum_masked,
norm=norm, cmap='viridis')
axes.set(xlim=(-0.5, lmax + 0.5))
elif (xscale == 'log'):
cmesh = axes.pcolormesh(lgrid[1:], mgrid[1:], spectrum_masked[1:],
norm=norm, cmap='viridis')
axes.set(xscale='log', xlim=(1., lmax + 0.5))
else:
raise ValueError(
"xscale must be 'lin' or 'log'. " +
"Input value is {:s}.".format(repr(xscale)))
if (yscale == 'lin'):
axes.set(ylim=(-lmax - 0.5, lmax + 0.5))
elif (yscale == 'log'):
axes.set(yscale='symlog', ylim=(-lmax - 0.5, lmax + 0.5))
else:
raise ValueError(
"yscale must be 'lin' or 'log'. " +
"Input value is {:s}.".format(repr(yscale)))
cb = _plt.colorbar(cmesh, ax=ax)
if (convention == 'energy'):
cb.set_label('Energy per coefficient', fontsize=axes_labelsize)
elif (convention == 'power'):
cb.set_label('Power per coefficient', fontsize=axes_labelsize)
else:
cb.set_label('Magnitude-squared coefficient',
fontsize=axes_labelsize)
cb.ax.tick_params(labelsize=tick_labelsize)
axes.set_xlabel('Spherical harmonic degree', fontsize=axes_labelsize)
axes.set_ylabel('Spherical harmonic order', fontsize=axes_labelsize)
axes.tick_params(labelsize=tick_labelsize)
axes.minorticks_on()
axes.grid(grid, which='major')
if ax is None:
fig.tight_layout(pad=0.5)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes
def plot_cross_spectrum2d(self, clm, convention='power', xscale='lin',
yscale='lin', grid=True, axes_labelsize=None,
tick_labelsize=None, vscale='log', vrange=None,
vmin=None, vmax=None, lmax=None, show=True,
ax=None, fname=None):
"""
Plot the cross-spectrum of two functions as a function of spherical
harmonic degree and order.
Usage
-----
x.plot_cross_spectrum2d(clm, [convention, xscale, yscale, grid,
axes_labelsize, tick_labelsize, vscale,
vrange, vmin, vmax, lmax, show, ax,
fname])
Parameters
----------
clm : SHCoeffs class instance.
The second function used in computing the cross-spectrum.
convention : str, optional, default = 'power'
The type of spectrum to plot: 'power' for power spectrum,
'energy' for energy spectrum, and 'l2norm' for the l2 norm
spectrum.
xscale : str, optional, default = 'lin'
Scale of the l axis: 'lin' for linear or 'log' for logarithmic.
yscale : str, optional, default = 'lin'
Scale of the m axis: 'lin' for linear or 'log' for logarithmic.
grid : bool, optional, default = True
If True, plot grid lines.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
vscale : str, optional, default = 'log'
Scale of the color axis: 'lin' for linear or 'log' for logarithmic.
vrange : (float, float), optional, default = None
Colormap range (min, max) relative to the maximum value. If None,
scale the image to the maximum and minimum values.
vmin : float, optional, default=None
The minmum range of the colormap. If None, the minimum value of the
spectrum will be used.
vmax : float, optional, default=None
The maximum range of the colormap. If None, the maximum value of
the spectrum will be used.
lmax : int, optional, default = self.lmax
The maximum spherical harmonic degree to plot.
show : bool, optional, default = True
If True, plot to the screen.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
Notes
-----
This method plots either the power, energy, or l2-norm for each
spherical harmonic degree and order of the function. Total power is
defined as the integral of the function squared over all space,
divided by the area the function spans. If the mean of the function is
zero, this is equivalent to the variance of the function. The total
energy is the integral of the function squared over all space and is
4pi times the total power. For normalized coefficients ('4pi',
'ortho', or 'schmidt'), the l2-norm is the sum of the magnitude of the
coefficients squared. If the input fields are complex, the absolute
value of the cross-spectrum will be plotted.
"""
if not isinstance(clm, SHCoeffs):
raise ValueError('clm must be an SHCoeffs class instance. Input '
'type is {:s}.'.format(repr(type(clm))))
if axes_labelsize is None:
axes_labelsize = _mpl.rcParams['axes.labelsize']
if tick_labelsize is None:
tick_labelsize = _mpl.rcParams['xtick.labelsize']
if lmax is None:
lmax = min(self.lmax, clm.lmax)
degrees = _np.arange(lmax + 1)
coeffs = clm.to_array(normalization=self.normalization,
csphase=self.csphase,
lmax=self.lmax)
# Create the matrix of the spectrum for each coefficient
spectrum = _np.empty((lmax + 1, 2 * lmax + 1))
mpositive = _np.abs(self.coeffs[0, :lmax + 1, :lmax + 1] *
coeffs[0, :lmax + 1, :lmax + 1].conj())
mpositive[~self.mask[0, :lmax + 1, :lmax + 1]] = _np.nan
mnegative = _np.abs(self.coeffs[1, :lmax + 1, :lmax + 1] *
coeffs[1, :lmax + 1, :lmax + 1].conj())
mnegative[~self.mask[1, :lmax + 1, :lmax + 1]] = _np.nan
spectrum[:, :lmax] = _np.fliplr(mnegative)[:, :lmax]
spectrum[:, lmax:] = mpositive
if (convention.lower() == 'l2norm'):
if self.normalization == 'unnorm':
raise ValueError("convention can not be set to 'l2norm' " +
"when using unnormalized harmonics.")
else:
pass
elif convention.lower() in ('power', 'energy'):
if self.normalization == '4pi':
pass
elif self.normalization == 'schmidt':
for l in degrees:
spectrum[l, :] /= (2. * l + 1.)
elif self.normalization == 'ortho':
for l in degrees:
spectrum[l, :] /= (4. * _np.pi)
elif self.normalization == 'unnorm':
for l in degrees:
ms = _np.arange(l+1)
conv = _factorial(l+ms) / (2. * l + 1.) / _factorial(l-ms)
if self.kind == 'real':
conv[1:l + 1] = conv[1:l + 1] / 2.
spectrum[l, lmax-l:lmax] *= conv[::-1][0:l]
spectrum[l, lmax:lmax+l+1] *= conv[0:l+1]
else:
raise ValueError(
"normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value is {:s}."
.format(repr(self.normalization)))
else:
raise ValueError(
"convention must be 'power', 'energy', or 'l2norm'. " +
"Input value is {:s}.".format(repr(convention)))
if convention == 'energy':
spectrum *= 4.0 * _np.pi
spectrum_masked = _np.ma.masked_invalid(spectrum)
# need to add one extra value to each in order for pcolormesh
# to plot the last row and column.
ls = _np.arange(lmax+2).astype(_np.float)
ms = _np.arange(-lmax, lmax + 2, dtype=_np.float)
lgrid, mgrid = _np.meshgrid(ls, ms, indexing='ij')
lgrid -= 0.5
mgrid -= 0.5
if ax is None:
fig, axes = _plt.subplots()
else:
axes = ax
if vrange is not None:
vmin = _np.nanmax(spectrum) * vrange[0]
vmax = _np.nanmax(spectrum) * vrange[1]
else:
if vmin is None:
_temp = spectrum
_temp[_temp == 0] = _np.NaN
vmin = _np.nanmin(_temp)
if vmax is None:
vmax = _np.nanmax(spectrum)
if vscale.lower() == 'log':
norm = _mpl.colors.LogNorm(vmin, vmax, clip=True)
# Clipping is required to avoid an invalid value error
elif vscale.lower() == 'lin':
norm = _plt.Normalize(vmin, vmax)
else:
raise ValueError(
"vscale must be 'lin' or 'log'. " +
"Input value is {:s}.".format(repr(vscale)))
if (xscale == 'lin'):
cmesh = axes.pcolormesh(lgrid, mgrid, spectrum_masked,
norm=norm, cmap='viridis')
axes.set(xlim=(-0.5, lmax + 0.5))
elif (xscale == 'log'):
cmesh = axes.pcolormesh(lgrid[1:], mgrid[1:], spectrum_masked[1:],
norm=norm, cmap='viridis')
axes.set(xscale='log', xlim=(1., lmax + 0.5))
else:
raise ValueError(
"xscale must be 'lin' or 'log'. " +
"Input value is {:s}.".format(repr(xscale)))
if (yscale == 'lin'):
axes.set(ylim=(-lmax - 0.5, lmax + 0.5))
elif (yscale == 'log'):
axes.set(yscale='symlog', ylim=(-lmax - 0.5, lmax + 0.5))
else:
raise ValueError(
"yscale must be 'lin' or 'log'. " +
"Input value is {:s}.".format(repr(yscale)))
cb = _plt.colorbar(cmesh, ax=ax)
if (convention == 'energy'):
cb.set_label('Energy per coefficient', fontsize=axes_labelsize)
elif (convention == 'power'):
cb.set_label('Power per coefficient', fontsize=axes_labelsize)
else:
cb.set_label('Magnitude-squared coefficient',
fontsize=axes_labelsize)
cb.ax.tick_params(labelsize=tick_labelsize)
axes.set_xlabel('Spherical harmonic degree', fontsize=axes_labelsize)
axes.set_ylabel('Spherical harmonic order', fontsize=axes_labelsize)
axes.tick_params(labelsize=tick_labelsize)
axes.minorticks_on()
axes.grid(grid, which='major')
if ax is None:
fig.tight_layout(pad=0.5)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes
# ================== REAL SPHERICAL HARMONICS ================
class SHRealCoeffs(SHCoeffs):
"""Real Spherical Harmonics Coefficient class."""
@staticmethod
def istype(kind):
"""Test if class is Real or Complex."""
return kind == 'real'
def __init__(self, coeffs, normalization='4pi', csphase=1, copy=True,
header=None):
"""Initialize Real SH Coefficients."""
lmax = coeffs.shape[1] - 1
# ---- create mask to filter out m<=l ----
mask = _np.zeros((2, lmax + 1, lmax + 1), dtype=_np.bool)
mask[0, 0, 0] = True
for l in _np.arange(lmax + 1):
mask[:, l, :l + 1] = True
mask[1, :, 0] = False
self.mask = mask
self.lmax = lmax
self.kind = 'real'
self.normalization = normalization
self.csphase = csphase
self.header = header
if copy:
self.coeffs = _np.copy(coeffs)
self.coeffs[~mask] = 0.
else:
self.coeffs = coeffs
def _make_complex(self):
"""Convert the real SHCoeffs class to the complex class."""
rcomplex_coeffs = _shtools.SHrtoc(self.coeffs,
convention=1, switchcs=0)
# These coefficients are using real floats, and need to be
# converted to complex form.
complex_coeffs = _np.zeros((2, self.lmax+1, self.lmax+1),
dtype='complex')
complex_coeffs[0, :, :] = (rcomplex_coeffs[0, :, :] + 1j *
rcomplex_coeffs[1, :, :])
complex_coeffs[1, :, :] = complex_coeffs[0, :, :].conjugate()
for m in self.degrees():
if m % 2 == 1:
complex_coeffs[1, :, m] = - complex_coeffs[1, :, m]
# complex_coeffs is initialized in this function and can be
# passed as reference
return SHCoeffs.from_array(complex_coeffs,
normalization=self.normalization,
csphase=self.csphase, copy=False)
def _rotate(self, angles, dj_matrix):
"""Rotate the coefficients by the Euler angles alpha, beta, gamma."""
if dj_matrix is None:
dj_matrix = _shtools.djpi2(self.lmax + 1)
# The coefficients need to be 4pi normalized with csphase = 1
coeffs = _shtools.SHRotateRealCoef(
self.to_array(normalization='4pi', csphase=1), angles, dj_matrix)
# Convert 4pi normalized coefficients to the same normalization
# as the unrotated coefficients.
if self.normalization != '4pi' or self.csphase != 1:
temp = _convert(coeffs, normalization_in='4pi', csphase_in=1,
normalization_out=self.normalization,
csphase_out=self.csphase)
return SHCoeffs.from_array(
temp, normalization=self.normalization,
csphase=self.csphase, copy=False)
else:
return SHCoeffs.from_array(coeffs, copy=False)
def _expandDH(self, sampling, lmax, lmax_calc, extend):
"""Evaluate the coefficients on a Driscoll and Healy (1994) grid."""
if self.normalization == '4pi':
norm = 1
elif self.normalization == 'schmidt':
norm = 2
elif self.normalization == 'unnorm':
norm = 3
elif self.normalization == 'ortho':
norm = 4
else:
raise ValueError(
"Normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Input value is {:s}."
.format(repr(self.normalization)))
data = _shtools.MakeGridDH(self.coeffs, sampling=sampling, norm=norm,
csphase=self.csphase, lmax=lmax,
lmax_calc=lmax_calc, extend=extend)
gridout = SHGrid.from_array(data, grid='DH', copy=False)
return gridout
def _expandGLQ(self, zeros, lmax, lmax_calc, extend):
"""Evaluate the coefficients on a Gauss Legendre quadrature grid."""
if self.normalization == '4pi':
norm = 1
elif self.normalization == 'schmidt':
norm = 2
elif self.normalization == 'unnorm':
norm = 3
elif self.normalization == 'ortho':
norm = 4
else:
raise ValueError(
"Normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Input value is {:s}."
.format(repr(self.normalization)))
if zeros is None:
zeros, weights = _shtools.SHGLQ(self.lmax)
data = _shtools.MakeGridGLQ(self.coeffs, zeros, norm=norm,
csphase=self.csphase, lmax=lmax,
lmax_calc=lmax_calc, extend=extend)
gridout = SHGrid.from_array(data, grid='GLQ', copy=False)
return gridout
def _expand_coord(self, lat, lon, lmax_calc, degrees):
"""Evaluate the function at the coordinates lat and lon."""
if self.normalization == '4pi':
norm = 1
elif self.normalization == 'schmidt':
norm = 2
elif self.normalization == 'unnorm':
norm = 3
elif self.normalization == 'ortho':
norm = 4
else:
raise ValueError(
"Normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Input value is {:s}."
.format(repr(self.normalization)))
if degrees is True:
latin = lat
lonin = lon
else:
latin = _np.rad2deg(lat)
lonin = _np.rad2deg(lon)
if type(lat) is not type(lon):
raise ValueError('lat and lon must be of the same type. ' +
'Input types are {:s} and {:s}.'
.format(repr(type(lat)), repr(type(lon))))
if type(lat) is int or type(lat) is float or type(lat) is _np.float_:
return _shtools.MakeGridPoint(self.coeffs, lat=latin, lon=lonin,
lmax=lmax_calc, norm=norm,
csphase=self.csphase)
elif type(lat) is _np.ndarray:
values = _np.empty_like(lat, dtype=float)
for v, latitude, longitude in _np.nditer([values, latin, lonin],
op_flags=['readwrite']):
v[...] = _shtools.MakeGridPoint(self.coeffs, lat=latitude,
lon=longitude,
lmax=lmax_calc, norm=norm,
csphase=self.csphase)
return values
elif type(lat) is list:
values = []
for latitude, longitude in zip(latin, lonin):
values.append(
_shtools.MakeGridPoint(self.coeffs, lat=latitude,
lon=longitude,
lmax=lmax_calc, norm=norm,
csphase=self.csphase))
return values
else:
raise ValueError('lat and lon must be either an int, float, '
'ndarray, or list. Input types are {:s} and {:s}.'
.format(repr(type(lat)), repr(type(lon))))
# =============== COMPLEX SPHERICAL HARMONICS ================
class SHComplexCoeffs(SHCoeffs):
"""Complex Spherical Harmonics Coefficients class."""
@staticmethod
def istype(kind):
"""Check if class has kind 'real' or 'complex'."""
return kind == 'complex'
def __init__(self, coeffs, normalization='4pi', csphase=1, copy=True,
header=None):
"""Initialize Complex coefficients."""
lmax = coeffs.shape[1] - 1
# ---- create mask to filter out m<=l ----
mask = _np.zeros((2, lmax + 1, lmax + 1), dtype=_np.bool)
mask[0, 0, 0] = True
for l in _np.arange(lmax + 1):
mask[:, l, :l + 1] = True
mask[1, :, 0] = False
self.mask = mask
self.lmax = lmax
self.kind = 'complex'
self.normalization = normalization
self.csphase = csphase
self.header = header
if copy:
self.coeffs = _np.copy(coeffs)
self.coeffs[~mask] = 0.
else:
self.coeffs = coeffs
def _make_real(self, check=True):
"""Convert the complex SHCoeffs class to the real class."""
# Test if the coefficients correspond to a real grid.
# This is not very elegant, and the equality condition
# is probably not robust to round off errors.
if check:
for l in self.degrees():
if self.coeffs[0, l, 0] != self.coeffs[0, l, 0].conjugate():
raise RuntimeError('Complex coefficients do not '
'correspond to a real field. '
'l = {:d}, m = 0: {:e}'
.format(l, self.coeffs[0, l, 0]))
for m in _np.arange(1, l + 1):
if m % 2 == 1:
if (self.coeffs[0, l, m] != -
self.coeffs[1, l, m].conjugate()):
raise RuntimeError('Complex coefficients do not '
'correspond to a real field. '
'l = {:d}, m = {:d}: {:e}, {:e}'
.format(
l, m, self.coeffs[0, l, 0],
self.coeffs[1, l, 0]))
else:
if (self.coeffs[0, l, m] !=
self.coeffs[1, l, m].conjugate()):
raise RuntimeError('Complex coefficients do not '
'correspond to a real field. '
'l = {:d}, m = {:d}: {:e}, {:e}'
.format(
l, m, self.coeffs[0, l, 0],
self.coeffs[1, l, 0]))
coeffs_rc = _np.zeros((2, self.lmax + 1, self.lmax + 1))
coeffs_rc[0, :, :] = self.coeffs[0, :, :].real
coeffs_rc[1, :, :] = self.coeffs[0, :, :].imag
real_coeffs = _shtools.SHctor(coeffs_rc, convention=1,
switchcs=0)
return SHCoeffs.from_array(real_coeffs,
normalization=self.normalization,
csphase=self.csphase)
def _rotate(self, angles, dj_matrix):
"""Rotate the coefficients by the Euler angles alpha, beta, gamma."""
# Note that the current method is EXTREMELY inefficient. The complex
# coefficients are expanded onto real and imaginary grids, each of
# the two components are rotated separately as real data, the rotated
# real data are re-expanded on new real and complex grids, they are
# combined to make a complex grid, and the resultant is expanded
# in complex spherical harmonics.
if dj_matrix is None:
dj_matrix = _shtools.djpi2(self.lmax + 1)
cgrid = self.expand(grid='DH')
rgrid, igrid = cgrid.data.real, cgrid.data.imag
rgridcoeffs = _shtools.SHExpandDH(rgrid, norm=1, sampling=1, csphase=1)
igridcoeffs = _shtools.SHExpandDH(igrid, norm=1, sampling=1, csphase=1)
rgridcoeffs_rot = _shtools.SHRotateRealCoef(
rgridcoeffs, angles, dj_matrix)
igridcoeffs_rot = _shtools.SHRotateRealCoef(
igridcoeffs, angles, dj_matrix)
rgrid_rot = _shtools.MakeGridDH(rgridcoeffs_rot, norm=1,
sampling=1, csphase=1)
igrid_rot = _shtools.MakeGridDH(igridcoeffs_rot, norm=1,
sampling=1, csphase=1)
grid_rot = rgrid_rot + 1j * igrid_rot
if self.normalization == '4pi':
norm = 1
elif self.normalization == 'schmidt':
norm = 2
elif self.normalization == 'unnorm':
norm = 3
elif self.normalization == 'ortho':
norm = 4
else:
raise ValueError(
"Normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Input value is {:s}."
.format(repr(self.normalization)))
coeffs_rot = _shtools.SHExpandDHC(grid_rot, norm=norm,
csphase=self.csphase)
return SHCoeffs.from_array(coeffs_rot,
normalization=self.normalization,
csphase=self.csphase, copy=False)
def _expandDH(self, sampling, lmax, lmax_calc, extend):
"""Evaluate the coefficients on a Driscoll and Healy (1994) grid."""
if self.normalization == '4pi':
norm = 1
elif self.normalization == 'schmidt':
norm = 2
elif self.normalization == 'unnorm':
norm = 3
elif self.normalization == 'ortho':
norm = 4
else:
raise ValueError(
"Normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Input value is {:s}."
.format(repr(self.normalization)))
data = _shtools.MakeGridDHC(self.coeffs, sampling=sampling,
norm=norm, csphase=self.csphase, lmax=lmax,
lmax_calc=lmax_calc, extend=extend)
gridout = SHGrid.from_array(data, grid='DH', copy=False)
return gridout
def _expandGLQ(self, zeros, lmax, lmax_calc, extend):
"""Evaluate the coefficients on a Gauss-Legendre quadrature grid."""
if self.normalization == '4pi':
norm = 1
elif self.normalization == 'schmidt':
norm = 2
elif self.normalization == 'unnorm':
norm = 3
elif self.normalization == 'ortho':
norm = 4
else:
raise ValueError(
"Normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Input value is {:s}."
.format(repr(self.normalization)))
if zeros is None:
zeros, weights = _shtools.SHGLQ(self.lmax)
data = _shtools.MakeGridGLQC(self.coeffs, zeros, norm=norm,
csphase=self.csphase, lmax=lmax,
lmax_calc=lmax_calc, extend=extend)
gridout = SHGrid.from_array(data, grid='GLQ', copy=False)
return gridout
def _expand_coord(self, lat, lon, lmax_calc, degrees):
"""Evaluate the function at the coordinates lat and lon."""
if self.normalization == '4pi':
norm = 1
elif self.normalization == 'schmidt':
norm = 2
elif self.normalization == 'unnorm':
norm = 3
elif self.normalization == 'ortho':
norm = 4
else:
raise ValueError(
"Normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Input value is {:s}."
.format(repr(self.normalization)))
if degrees is True:
latin = lat
lonin = lon
else:
latin = _np.rad2deg(lat)
lonin = _np.rad2deg(lon)
if type(lat) is not type(lon):
raise ValueError('lat and lon must be of the same type. ' +
'Input types are {:s} and {:s}.'
.format(repr(type(lat)), repr(type(lon))))
if type(lat) is int or type(lat) is float or type(lat) is _np.float_:
return _shtools.MakeGridPointC(self.coeffs, lat=latin, lon=lonin,
lmax=lmax_calc, norm=norm,
csphase=self.csphase)
elif type(lat) is _np.ndarray:
values = _np.empty_like(lat, dtype=_np.complex)
for v, latitude, longitude in _np.nditer([values, latin, lonin],
op_flags=['readwrite']):
v[...] = _shtools.MakeGridPointC(self.coeffs, lat=latitude,
lon=longitude,
lmax=lmax_calc, norm=norm,
csphase=self.csphase)
return values
elif type(lat) is list:
values = []
for latitude, longitude in zip(latin, lonin):
values.append(
_shtools.MakeGridPointC(self.coeffs, lat=latitude,
lon=longitude,
lmax=lmax_calc, norm=norm,
csphase=self.csphase))
return values
else:
raise ValueError('lat and lon must be either an int, float, ' +
'ndarray, or list. ' +
'Input types are {:s} and {:s}.'
.format(repr(type(lat)), repr(type(lon))))
# =============================================================================
# ========= GRID CLASSES ================================================
# =============================================================================
class SHGrid(object):
"""
Class for spatial gridded data on the sphere.
Grids can be initialized from:
x = SHGrid.from_array(array)
x = SHGrid.from_xarray(data_array)
x = SHGrid.from_netcdf(netcdf)
x = SHGrid.from_file('fname.dat')
x = SHGrid.from_zeros(lmax)
x = SHGrid.from_cap(theta, clat, clon, lmax)
The class instance defines the following class attributes:
data : Gridded array of the data.
nlat, nlon : The number of latitude and longitude bands in the grid.
n : The number of samples in latitude for 'DH' grids.
lmax : The maximum spherical harmonic degree that can be resolved
by the grid sampling.
sampling : The longitudinal sampling for Driscoll and Healy grids. Either
1 for equally sampled grids (nlat=nlon) or 2 for equally
spaced grids in degrees.
kind : Either 'real' or 'complex' for the data type.
grid : Either 'DH' or 'GLQ' for Driscoll and Healy grids or Gauss-
Legendre Quadrature grids.
zeros : The cos(colatitude) nodes used with Gauss-Legendre
Quadrature grids. Default is None.
weights : The latitudinal weights used with Gauss-Legendre
Quadrature grids. Default is None.
extend : True if the grid contains the redundant column for 360 E and
(for 'DH' grids) the unnecessary row for 90 S.
Each class instance provides the following methods:
to_array() : Return the raw gridded data as a numpy array.
to_xarray() : Return the gridded data as an xarray DataArray.
to_file() : Save gridded data to a text or binary file.
to_netcdf() : Return the gridded data as a netcdf formatted file or object.
to_real() : Return a new SHGrid class instance of the real component
of the data.
to_imag() : Return a new SHGrid class instance of the imaginary component
of the data.
lats() : Return a vector containing the latitudes of each row
of the gridded data.
lons() : Return a vector containing the longitudes of each column
of the gridded data.
expand() : Expand the grid into spherical harmonics.
max() : Return the maximum value of data using numpy.max().
min() : Return the minimum value of data using numpy.min().
copy() : Return a copy of the class instance.
plot() : Plot the data.
plotgmt() : Plot projected data using the generic mapping tools (GMT).
plot3d() : Plot the raw data on a 3d sphere.
info() : Print a summary of the data stored in the SHGrid instance.
"""
def __init__():
"""Unused constructor of the super class."""
print('Initialize the class using one of the class methods:\n'
'>>> pyshtools.SHGrid.from_array\n'
'>>> pyshtools.SHGrid.from_xarray\n'
'>>> pyshtools.SHGrid.from_netcdf\n'
'>>> pyshtools.SHGrid.from_file\n'
'>>> pyshtools.SHGrid.from_zeros\n'
'>>> pyshtools.SHGrid.from_cap\n')
# ---- Factory methods ----
@classmethod
def from_array(self, array, grid='DH', copy=True):
"""
Initialize the class instance from an input array.
Usage
-----
x = SHGrid.from_array(array, [grid, copy])
Returns
-------
x : SHGrid class instance
Parameters
----------
array : ndarray, shape (nlat, nlon)
2-D numpy array of the gridded data, where nlat and nlon are the
number of latitudinal and longitudinal bands, respectively.
grid : str, optional, default = 'DH'
'DH' or 'GLQ' for Driscoll and Healy grids or Gauss-Legendre
Quadrature grids, respectively.
copy : bool, optional, default = True
If True (default), make a copy of array when initializing the class
instance. If False, initialize the class instance with a reference
to array.
"""
if _np.iscomplexobj(array):
kind = 'complex'
else:
kind = 'real'
if type(grid) != str:
raise ValueError('grid must be a string. Input type is {:s}.'
.format(str(type(grid))))
if grid.upper() not in set(['DH', 'GLQ']):
raise ValueError(
"grid must be 'DH' or 'GLQ'. Input value is {:s}."
.format(repr(grid))
)
for cls in self.__subclasses__():
if cls.istype(kind) and cls.isgrid(grid):
return cls(array, copy=copy)
@classmethod
def from_zeros(self, lmax, grid='DH', kind='real', sampling=2,
extend=True):
"""
Initialize the class instance using an array of zeros.
Usage
-----
x = SHGrid.from_zeros(lmax, [grid, kind, sampling, extend])
Returns
-------
x : SHGrid class instance
Parameters
----------
lmax : int
The maximum spherical harmonic degree resolvable by the grid.
grid : str, optional, default = 'DH'
'DH' or 'GLQ' for Driscoll and Healy grids or Gauss Legendre
Quadrature grids, respectively.
kind : str, optional, default = 'real'
Either 'real' or 'complex' for the data type.
sampling : int, optional, default = 2
The longitudinal sampling for Driscoll and Healy grids. Either 1
for equally sampled grids (nlong=nlat) or 2 for equally spaced
grids in degrees (nlong=2*nlat with extend=False or nlong=2*nlat-1
with extend=True).
extend : bool, optional, default = True
If True, include the longitudinal band for 360 E (DH and GLQ grids)
and the latitudinal band for 90 S (DH grids only).
"""
if type(grid) != str:
raise ValueError('grid must be a string. Input type is {:s}.'
.format(str(type(grid))))
if grid.upper() not in set(['DH', 'GLQ']):
raise ValueError("grid must be 'DH' or 'GLQ'. " +
"Input value is {:s}.".format(repr(grid)))
if grid.upper() == 'DH':
nlat = 2 * lmax + 2
if sampling == 1:
nlon = nlat
else:
nlon = nlat * 2
if extend:
nlat += 1
nlon += 1
elif grid.upper() == 'GLQ':
nlat = lmax + 1
nlon = 2 * nlat - 1
if extend:
nlon += 1
if kind == 'real':
array = _np.zeros((nlat, nlon), dtype=_np.float_)
else:
array = _np.zeros((nlat, nlon), dtype=_np.complex_)
for cls in self.__subclasses__():
if cls.istype(kind) and cls.isgrid(grid):
return cls(array, copy=False)
@classmethod
def from_cap(self, theta, clat, clon, lmax, grid='DH', kind='real',
sampling=2, degrees=True, extend=True):
"""
Initialize the class instance with an array equal to unity within
a spherical cap and zero elsewhere.
Usage
-----
x = SHGrid.from_cap(theta, clat, clon, lmax, [grid, kind, sampling,
degrees, extend])
Returns
-------
x : SHGrid class instance
Parameters
----------
theta : float
The angular radius of the spherical cap, default in degrees.
clat, clon : float
Latitude and longitude of the center of the rotated spherical cap
(default in degrees).
lmax : int
The maximum spherical harmonic degree resolvable by the grid.
grid : str, optional, default = 'DH'
'DH' or 'GLQ' for Driscoll and Healy grids or Gauss-Legendre
Quadrature grids, respectively.
kind : str, optional, default = 'real'
Either 'real' or 'complex' for the data type.
sampling : int, optional, default = 2
The longitudinal sampling for Driscoll and Healy grids. Either 1
for equally sampled grids (nlong=nlat) or 2 for equally spaced
grids in degrees (nlong=2*nlat with extend=False or nlong=2*nlat-1
with extend=True).
degrees : bool, optional = True
If True, theta, clat, and clon are in degrees.
extend : bool, optional, default = True
If True, include the longitudinal band for 360 E (DH and GLQ grids)
and the latitudinal band for 90 S (DH grids only).
"""
temp = self.from_zeros(lmax, grid=grid, kind=kind, sampling=sampling,
extend=extend)
if degrees is True:
theta = _np.deg2rad(theta)
clat = _np.deg2rad(clat)
clon = _np.deg2rad(clon)
# Set array equal to 1 within the cap
lats = temp.lats(degrees=False)
lons = temp.lons(degrees=False)
imin = _np.inf
imax = 0
for i, lat in enumerate(lats):
if lat <= clat + theta:
if i <= imin:
imin = i
if lat >= clat - theta:
if i >= imax:
imax = i
x = _np.cos(clat) * _np.cos(clon)
y = _np.cos(clat) * _np.sin(clon)
z = _np.sin(clat)
coslon = _np.cos(lons)
sinlon = _np.sin(lons)
for i in range(imin, imax+1):
coslat = _np.cos(lats[i])
sinlat = _np.sin(lats[i])
for j in range(0, temp.nlon):
dist = coslat * (x * coslon[j] + y * sinlon[j]) + z * sinlat
if _np.arccos(dist) <= theta:
temp.data[i, j] = 1.
return temp
@classmethod
def from_file(self, fname, binary=False, grid='DH', **kwargs):
"""
Initialize the class instance from gridded data in a file.
Usage
-----
x = SHGrid.from_file(fname, [binary, grid, **kwargs])
Returns
-------
x : SHGrid class instance
Parameters
----------
fname : str
The filename containing the gridded data. For text files (default)
the file is read using the numpy routine loadtxt(), whereas for
binary files, the file is read using numpy.load(). For Driscoll and
Healy grids, the dimensions of the array must be nlon=nlat,
nlon=2*nlat or nlon=2*nlat-1. For Gauss-Legendre Quadrature grids,
the dimensions of the array must be nlon=2*nlat-1 or nlon=2*nlat.
binary : bool, optional, default = False
If False, read a text file. If True, read a binary 'npy' file.
grid : str, optional, default = 'DH'
'DH' or 'GLQ' for Driscoll and Healy grids or Gauss-Legendre
Quadrature grids, respectively.
**kwargs : keyword arguments, optional
Keyword arguments of numpy.loadtxt() or numpy.load().
"""
if binary is False:
data = _np.loadtxt(fname, **kwargs)
elif binary is True:
data = _np.load(fname, **kwargs)
else:
raise ValueError('binary must be True or False. '
'Input value is {:s}.'.format(binary))
return self.from_array(data, grid=grid, copy=False)
@classmethod
def from_xarray(self, data_array, grid='DH'):
"""
Initialize the class instance from an xarray DataArray object.
Usage
-----
x = SHGrid.from_xarray(data_array, [grid])
Returns
-------
x : SHGrid class instance
Parameters
----------
xarray : xarray DataArray
The xarray DataArray containing the gridded data. For Driscoll and
Healy grids, the dimensions of the array must be nlon=nlat,
nlon=2*nlat or nlon=2*nlat-1. For Gauss-Legendre Quadrature grids,
the dimensions of the array must be nlon=2*nlat-1 or nlon=2*nlat.
grid : str, optional, default = 'DH'
'DH' or 'GLQ' for Driscoll and Healy grids or Gauss-Legendre
Quadrature grids, respectively.
"""
return self.from_array(data_array.values, grid=grid)
@classmethod
def from_netcdf(self, netcdf, grid='DH'):
"""
Initialize the class instance from a netcdf formatted file or object.
Usage
-----
x = SHGrid.from_netcdf(netcdf, [grid])
Returns
-------
x : SHGrid class instance
Parameters
----------
netcdf : str or netcdf object
The name of a netcdf file or object.
grid : str, optional, default = 'DH'
'DH' or 'GLQ' for Driscoll and Healy grids or Gauss-Legendre
Quadrature grids, respectively.
"""
data_array = _xr.open_dataarray(netcdf)
return self.from_array(data_array.values, grid=grid)
def copy(self):
"""
Return a deep copy of the class instance.
Usage
-----
copy = x.copy()
"""
return _copy.deepcopy(self)
def to_file(self, filename, binary=False, **kwargs):
"""
Save gridded data to a file.
Usage
-----
x.to_file(filename, [binary, **kwargs])
Parameters
----------
filename : str
Name of output file. For text files (default), the file will be
saved automatically in gzip compressed format if the filename ends
in .gz.
binary : bool, optional, default = False
If False, save as text using numpy.savetxt(). If True, save as a
'npy' binary file using numpy.save().
**kwargs : keyword arguments, optional
Keyword arguments of numpy.savetxt() and numpy.save().
"""
if binary is False:
_np.savetxt(filename, self.data, **kwargs)
elif binary is True:
_np.save(filename, self.data, **kwargs)
else:
raise ValueError('binary must be True or False. '
'Input value is {:s}.'.format(binary))
def to_xarray(self, title=None, comment='pyshtools grid',
long_name=None, units=None):
"""
Return the gridded data as an xarray DataArray.
Usage
-----
x.to_xarray([title, comment, long_name, units])
Parameters
----------
title : str, optional, default = None
Title of the dataset.
comment : str, optional, default = 'pyshtools grid'
Additional information about how the data were generated.
long_name : str, optional, default = None
A long descriptive name of the gridded data, used to label a
colorbar.
units : str, optional, default = None
Units of the gridded data, used to label a colorbar.
"""
attrs = {'actual_range': [self.min(), self.max()],
'comment': comment,
'nlat': self.nlat,
'nlon': self.nlon,
'lmax': self.lmax,
'kind': self.kind,
'grid': self.grid
}
if self.grid == 'GLQ':
attrs['zeros'] = self.zeros
attrs['weights'] = self.weights
else:
attrs['sampling'] = self.sampling
if title is not None:
attrs['title'] = title
if long_name is not None:
attrs['long_name'] = long_name
if units is not None:
attrs['units'] = units
return _xr.DataArray(self.to_array(),
coords=[('lat', self.lats(),
{'long_name': 'latitude',
'units': 'degrees_north',
'actual_range': [self.lats()[0],
self.lats()[-1]]}),
('lon', self.lons(),
{'long_name': 'longitude',
'units': 'degrees_east',
'actual_range': [self.lons()[0],
self.lons()[-1]]})],
attrs=attrs)
def to_netcdf(self, filename=None, title=None, description=None,
comment='pyshtools grid', name='data',
long_name=None, units=None, dtype=None):
"""
Return the gridded data as a netcdf formatted file or object.
Usage
-----
x.to_netcdf([filename, title, description, comment, name, long_name,
units, dtype])
Parameters
----------
filename : str, optional, default = None
Name of output file.
title : str, optional, default = None
Title of the dataset.
description : str, optional, default = None
Description of the dataset ('Remark' in gmt grd files).
comment : str, optional, default = 'pyshtools grid'
Additional information about how the data were generated.
name : str, optional, default = 'data'
Name of the data array.
long_name : str, optional, default = None
A long descriptive name of the gridded data.
units : str, optional, default = None
Units of the gridded data.
dtype : str, optional, default = None
If 'f', convert the grid to single precision 32-bit floating
point numbers.
"""
if self.kind == 'complex':
raise RuntimeError('netcdf files do not support complex data '
'formats.')
_data = self.to_xarray(title=title, comment=comment,
long_name=long_name, units=units)
if dtype == 'f':
_data.values = _data.values.astype(_np.float32)
attrs = {}
if title is not None:
attrs['title'] = title
if description is not None:
attrs['description'] = description
if comment is not None:
attrs['comment'] = comment
_dataset = _xr.Dataset({name: _data}, attrs=attrs)
if filename is None:
return _dataset.to_netcdf()
else:
_dataset.to_netcdf(filename)
def to_array(self):
"""
Return the raw gridded data as a numpy array.
Usage
-----
grid = x.to_array()
Returns
-------
grid : ndarray, shape (nlat, nlon)
2-D numpy array of the gridded data.
"""
return _np.copy(self.data)
def to_real(self):
"""
Return a new SHGrid class instance of the real component of the data.
Usage
-----
grid = x.to_real()
Returns
-------
grid : SHGrid class instance
"""
return SHGrid.from_array(self.to_array().real, grid=self.grid,
copy=False)
def to_imag(self):
"""
Return a new SHGrid class instance of the imaginary component of the
data.
Usage
-----
grid = x.to_imag()
Returns
-------
grid : SHGrid class instance
"""
return SHGrid.from_array(self.to_array().imag, grid=self.grid,
copy=False)
# ---- Mathematical operators ----
def min(self):
"""
Return the minimum value of self.data using numpy.min().
Usage
-----
x.min()
"""
return _np.min(self.data)
def max(self):
"""
Return the maximum value of self.data using numpy.max().
Usage
-----
x.max()
"""
return _np.max(self.data)
def __add__(self, other):
"""Add two similar grids or a grid and a scaler: self + other."""
if isinstance(other, SHGrid):
if (self.grid == other.grid and self.data.shape ==
other.data.shape and self.kind == other.kind):
data = self.data + other.data
return SHGrid.from_array(data, grid=self.grid)
else:
raise ValueError('The two grids must be of the '
'same kind and have the same shape.')
elif _np.isscalar(other) is True:
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not add a complex constant to a '
'real grid.')
data = self.data + other
return SHGrid.from_array(data, grid=self.grid)
else:
raise NotImplementedError('Mathematical operator not implemented '
'for these operands.')
def __radd__(self, other):
"""Add two similar grids or a grid and a scaler: self + other."""
return self.__add__(other)
def __sub__(self, other):
"""Subtract two similar grids or a grid and a scaler: self - other."""
if isinstance(other, SHGrid):
if (self.grid == other.grid and self.data.shape ==
other.data.shape and self.kind == other.kind):
data = self.data - other.data
return SHGrid.from_array(data, grid=self.grid)
else:
raise ValueError('The two grids must be of the '
'same kind and have the same shape.')
elif _np.isscalar(other) is True:
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not subtract a complex constant from '
'a real grid.')
data = self.data - other
return SHGrid.from_array(data, grid=self.grid)
else:
raise NotImplementedError('Mathematical operator not implemented '
'for these operands.')
def __rsub__(self, other):
"""Subtract two similar grids or a grid and a scaler: other - self."""
if isinstance(other, SHGrid):
if (self.grid == other.grid and self.data.shape ==
other.data.shape and self.kind == other.kind):
data = other.data - self.data
return SHGrid.from_array(data, grid=self.grid)
else:
raise ValueError('The two grids must be of the '
'same kind and have the same shape.')
elif _np.isscalar(other) is True:
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not subtract a complex constant from '
'a real grid.')
data = other - self.data
return SHGrid.from_array(data, grid=self.grid)
else:
raise NotImplementedError('Mathematical operator not implemented '
'for these operands.')
def __mul__(self, other):
"""Multiply two similar grids or a grid and a scaler: self * other."""
if isinstance(other, SHGrid):
if (self.grid == other.grid and self.data.shape ==
other.data.shape and self.kind == other.kind):
data = self.data * other.data
return SHGrid.from_array(data, grid=self.grid)
else:
raise ValueError('The two grids must be of the '
'same kind and have the same shape.')
elif _np.isscalar(other) is True:
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not multiply a real grid by a complex '
'constant.')
data = self.data * other
return SHGrid.from_array(data, grid=self.grid)
else:
raise NotImplementedError('Mathematical operator not implemented '
'for these operands.')
def __rmul__(self, other):
"""Multiply two similar grids or a grid and a scaler: other * self."""
return self.__mul__(other)
def __truediv__(self, other):
"""
Divide two similar grids or a grid and a scalar.
"""
if isinstance(other, SHGrid):
if (self.grid == other.grid and self.data.shape ==
other.data.shape and self.kind == other.kind):
data = self.data / other.data
return SHGrid.from_array(data, grid=self.grid)
else:
raise ValueError('The two grids must be of the '
'same kind and have the same shape.')
elif _np.isscalar(other) is True:
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not divide a real grid by a complex '
'constant.')
data = self.data / other
return SHGrid.from_array(data, grid=self.grid)
else:
raise NotImplementedError('Mathematical operator not implemented '
'for these operands.')
def __pow__(self, other):
"""Raise a grid to a scalar power: pow(self, other)."""
if _np.isscalar(other) is True:
return SHGrid.from_array(pow(self.data, other), grid=self.grid)
else:
raise NotImplementedError('Mathematical operator not implemented '
'for these operands.')
def __abs__(self):
"""Return the absolute value of the gridded data."""
return SHGrid.from_array(abs(self.data), grid=self.grid)
def __repr__(self):
str = ('kind = {:s}\n'
'grid = {:s}\n'.format(repr(self.kind), repr(self.grid)))
if self.grid == 'DH':
str += ('n = {:d}\n'
'sampling = {:d}\n'.format(self.n, self.sampling))
str += ('nlat = {:d}\n'
'nlon = {:d}\n'
'lmax = {:d}\n'
'extend = {}'.format(self.nlat, self.nlon, self.lmax,
self.extend))
return str
# ---- Extract grid properties ----
def lats(self, degrees=True):
"""
Return the latitudes of each row of the gridded data.
Usage
-----
lats = x.lats([degrees])
Returns
-------
lats : ndarray, shape (nlat)
1-D numpy array of size nlat containing the latitude of each row
of the gridded data.
Parameters
-------
degrees : bool, optional, default = True
If True, the output will be in degrees. If False, the output will
be in radians.
"""
if degrees is False:
return _np.radians(self._lats())
else:
return self._lats()
def lons(self, degrees=True):
"""
Return the longitudes of each column of the gridded data.
Usage
-----
lons = x.get_lon([degrees])
Returns
-------
lons : ndarray, shape (nlon)
1-D numpy array of size nlon containing the longitude of each row
of the gridded data.
Parameters
-------
degrees : bool, optional, default = True
If True, the output will be in degrees. If False, the output will
be in radians.
"""
if degrees is False:
return _np.radians(self._lons())
else:
return self._lons()
# ---- Plotting routines ----
def plot3d(self, elevation=20, azimuth=30, cmap='RdBu_r', show=True,
fname=None):
"""
Plot the raw data on a 3d sphere.
This routines becomes slow for large grids because it is based on
matplotlib3d.
Usage
-----
x.plot3d([elevation, azimuth, show, fname])
Parameters
----------
elevation : float, optional, default = 20
elev parameter for the 3d projection.
azimuth : float, optional, default = 30
azim parameter for the 3d projection.
cmap : str, optional, default = 'RdBu_r'
Name of the color map to use.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, save the image to the specified file.
"""
from mpl_toolkits.mplot3d import Axes3D # noqa: F401
nlat, nlon = self.nlat, self.nlon
cmap = _plt.get_cmap(cmap)
if self.kind == 'real':
data = self.data
elif self.kind == 'complex':
data = _np.abs(self.data)
else:
raise ValueError('Grid has to be either real or complex, not {}.'
.format(self.kind))
lats = self.lats()
lons = self.lons()
if self.grid == 'DH':
# add south pole
lats_circular = _np.append(lats, [-90.])
elif self.grid == 'GLQ':
# add north and south pole
lats_circular = _np.hstack(([90.], lats, [-90.]))
lons_circular = _np.append(lons, [lons[0]])
nlats_circular = len(lats_circular)
nlons_circular = len(lons_circular)
sshape = nlats_circular, nlons_circular
# make uv sphere and store all points
u = _np.radians(lons_circular)
v = _np.radians(90. - lats_circular)
x = _np.sin(v)[:, None] * _np.cos(u)[None, :]
y = _np.sin(v)[:, None] * _np.sin(u)[None, :]
z = _np.cos(v)[:, None] * _np.ones_like(lons_circular)[None, :]
points = _np.vstack((x.flatten(), y.flatten(), z.flatten()))
# fill data for all points. 0 lon has to be repeated (circular mesh)
# and the south pole has to be added in the DH grid
if self.grid == 'DH':
magn_point = _np.zeros((nlat + 1, nlon + 1))
magn_point[:-1, :-1] = data
magn_point[-1, :] = _np.mean(data[-1]) # not exact !
magn_point[:-1, -1] = data[:, 0]
if self.grid == 'GLQ':
magn_point = _np.zeros((nlat + 2, nlon + 1))
magn_point[1:-1, :-1] = data
magn_point[0, :] = _np.mean(data[0]) # not exact !
magn_point[-1, :] = _np.mean(data[-1]) # not exact !
magn_point[1:-1, -1] = data[:, 0]
# compute face color, which is the average of all neighbour points
magn_face = 1./4. * (magn_point[1:, 1:] + magn_point[:-1, 1:] +
magn_point[1:, :-1] + magn_point[:-1, :-1])
magnmax_face = _np.max(_np.abs(magn_face))
magnmax_point = _np.max(_np.abs(magn_point))
# compute colours and displace the points
norm = _plt.Normalize(-magnmax_face / 2., magnmax_face / 2., clip=True)
colors = cmap(norm(magn_face.flatten()))
colors = colors.reshape(nlats_circular - 1, nlons_circular - 1, 4)
points *= (1. + magn_point.flatten() / magnmax_point / 2.)
x = points[0].reshape(sshape)
y = points[1].reshape(sshape)
z = points[2].reshape(sshape)
# plot 3d radiation pattern
fig = _plt.figure()
ax3d = fig.add_subplot(1, 1, 1, projection='3d')
ax3d.plot_surface(x, y, z, rstride=1, cstride=1, facecolors=colors)
ax3d.set(xlim=(-1., 1.), ylim=(-1., 1.), zlim=(-1., 1.),
xticks=[-1, 1], yticks=[-1, 1], zticks=[-1, 1])
ax3d.set_axis_off()
ax3d.view_init(elev=elevation, azim=azimuth)
# show or save output
fig.tight_layout(pad=0.5)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, ax3d
def plot(self, projection=None, tick_interval=[30, 30], ticks='WSen',
minor_tick_interval=[None, None], title=None, titlesize=None,
colorbar=None, cmap='viridis', cmap_limits=None,
cmap_limits_complex=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_ylabel=None,
cb_tick_interval=None, cb_minor_tick_interval=None,
cb_offset=None, cb_width=None, grid=False, axes_labelsize=None,
tick_labelsize=None, xlabel=True, ylabel=True, ax=None, ax2=None,
show=True, fname=None):
"""
Plot the data using a Cartopy projection or a matplotlib cylindrical
projection.
Usage
-----
fig, ax = x.plot([projection, tick_interval, minor_tick_interval,
ticks, xlabel, ylabel, title, colorbar, cmap,
cmap_limits, cmap_limits_complex, cmap_reverse,
cb_triangles, cb_label, cb_ylabel, cb_tick_interval,
cb_minor_tick_interval, cb_offset, cb_width, grid,
titlesize, axes_labelsize, tick_labelsize, ax, ax2,
show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'Longitude' or 'GLQ longitude index'
Label for the longitude axis.
ylabel : str, optional, default = 'Latitude' or 'GLQ latitude index'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot. If the grid is complex, title should be a
list of strings for the real and complex components.
colorbar : str, optional, default = None
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the
interval is specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2]. If the data are
complex, these limits will be used for the real component.
cmap_limits_complex : list, optional, default = None
Set the lower and upper limits of the imaginary component of the
data used by the colormap, and optionally an interval for each
color band.
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = None
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear. If the
grid is complex, the real component of the grid will be plotted
on this axes.
ax2 : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear. If the
grid is complex, the complex component of the grid will be plotted
on this axes.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if projection is not None:
if _cartopy_module:
if not isinstance(projection, _ccrs.Projection):
raise ValueError('The input projection must be an '
'instance of cartopy.crs.Projection.')
else:
raise ImportError('When using map projections, plot() '
'requires installation of Cartopy.')
if self.grid != 'DH':
raise ValueError('Map projections are supported only for '
'DH grid types.')
if tick_interval is None:
tick_interval = [None, None]
if minor_tick_interval is None:
minor_tick_interval = [None, None]
if axes_labelsize is None:
axes_labelsize = _mpl.rcParams['axes.labelsize']
if axes_labelsize == 'medium':
axes_labelsize = _mpl.rcParams['font.size']
if tick_labelsize is None:
tick_labelsize = _mpl.rcParams['xtick.labelsize']
if tick_labelsize == 'medium':
tick_labelsize = _mpl.rcParams['font.size']
if titlesize is None:
titlesize = _mpl.rcParams['axes.titlesize']
if self.kind == 'complex' and title is None:
title = ['Real component', 'Imaginary component']
if xlabel is True:
if self.grid == 'DH':
xlabel = 'Longitude'
else:
xlabel = 'GLQ longitude index'
if ylabel is True:
if self.grid == 'DH':
ylabel = 'Latitude'
else:
ylabel = 'GLQ latitude index'
if colorbar is not None:
if colorbar not in set(['top', 'bottom', 'left', 'right']):
raise ValueError("colorbar must be 'top', 'bottom', 'left' or "
"'right'. Input value is {:s}."
.format(repr(colorbar)))
if ax is None and ax2 is None:
fig, axes = self._plot(
projection=projection, colorbar=colorbar,
cb_triangles=cb_triangles, cb_label=cb_label, grid=grid,
axes_labelsize=axes_labelsize, tick_labelsize=tick_labelsize,
title=title, titlesize=titlesize, xlabel=xlabel, ylabel=ylabel,
tick_interval=tick_interval, ticks=ticks,
minor_tick_interval=minor_tick_interval,
cb_tick_interval=cb_tick_interval, cb_ylabel=cb_ylabel,
cb_minor_tick_interval=cb_minor_tick_interval, cmap=cmap,
cmap_limits=cmap_limits, cb_offset=cb_offset,
cb_width=cb_width, cmap_limits_complex=cmap_limits_complex,
cmap_reverse=cmap_reverse)
else:
if self.kind == 'complex':
if (ax is None and ax2 is not None) or (ax2 is None and
ax is not None):
raise ValueError('For complex grids, one must specify '
'both optional arguments ax and ax2.')
self._plot(projection=projection,
ax=ax, ax2=ax2, colorbar=colorbar,
cb_triangles=cb_triangles, cb_label=cb_label,
grid=grid, axes_labelsize=axes_labelsize,
tick_labelsize=tick_labelsize, title=title,
xlabel=xlabel, ylabel=ylabel,
tick_interval=tick_interval, ticks=ticks,
minor_tick_interval=minor_tick_interval,
titlesize=titlesize, cmap=cmap, cb_offset=cb_offset,
cb_tick_interval=cb_tick_interval,
cb_minor_tick_interval=cb_minor_tick_interval,
cmap_limits=cmap_limits, cb_ylabel=cb_ylabel,
cb_width=cb_width,
cmap_limits_complex=cmap_limits_complex,
cmap_reverse=cmap_reverse)
if ax is None:
fig.tight_layout(pad=0.5)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes
def plotgmt(self, fig=None, projection='mollweide', region='g',
width=None, unit='i', central_latitude=0, central_longitude=0,
grid=[30, 30], tick_interval=[30, 30],
minor_tick_interval=[None, None], ticks='WSen', title=None,
cmap='viridis', cmap_limits=None, cmap_limits_complex=None,
cmap_reverse=False, cmap_continuous=False, colorbar=None,
cb_triangles='both', cb_label=None, cb_ylabel=None,
cb_tick_interval=None, cb_minor_tick_interval=None,
cb_offset=None, horizon=60, offset=[None, None], fname=None):
"""
Plot projected data using the Generic Mapping Tools (pygmt).
To display the figure in a jupyter notebook, use
fig.show()
To display the figure in the terminal environment, use
fig.show(method='external')
Usage
-----
fig = x.plotgmt([fig, projection, region, width, unit,
central_latitude, central_longitude, grid,
tick_interval, minor_tick_interval, ticks, title,
cmap, cmap_limits, cmap_limits_complex, cmap_reverse,
cmap_continuous, colorbar, cb_triangles, cb_label,
cb_ylabel, cb_tick_interval, cb_minor_tick_interval,
cb_offset, horizon, offset, fname])
Returns
-------
fig : pygmt.figure.Figure class instance
Parameters
----------
fig : pygmt.Figure() class instance, optional, default = None
If provided, the plot will be placed in a pre-existing figure.
projection : str, optional, default = 'mollweide'
The name of a global or hemispherical projection (see Notes). Only
the first three characters are necessary to identify the
projection.
region : str or list, optional, default = 'g'
The map region, consisting of a list [West, East, South, North] in
degrees. The default 'g' specifies the entire sphere.
width : float, optional, default = mpl.rcParams['figure.figsize'][0]
The width of the projected image.
unit : str, optional, default = 'i'
The measurement unit of the figure width and offset: 'i' for
inches or 'c' for cm.
central_longitude : float, optional, default = 0
The central meridian or center of the projection.
central_latitude : float, optional, default = 0
The center of the projection used with hemispheric projections, or
the standard parallel used with cylindrical projections.
grid : list, optional, default = [30, 30]
Grid line interval [longitude, latitude] in degrees. If None, grid
lines will not be plotted for that axis. If true, gridlines will be
plotted at the same interval as the major ticks.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters will plot the ticks and annotations, whereas small letters
will plot only the ticks. 'W', 'S', 'E', and 'N' denote the west,
south, east and north boundaries of the plot.
title : str, optional, default = None
The title to be displayed above the plot.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap
when plotting, and optionally an interval for each color.
cmap_limits_complex : list, optional, default = None
Set the lower and upper limits of the imaginary component of the
data used by the colormap, and optionally an interval for each
color band.
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cmap_continuous : bool, optional, default = False
If True, create a continuous colormap. Default behavior is to
use contant colors for each interval.
colorbar : str, optional, default = None
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cb_triangles : str, optional, default = 'both'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = None
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Annotation interval on the colorbar.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
horizon : float, optional, default = 60
The horizon (number of degrees from the center to the edge) used
with the Gnomonic projection.
offset : list, optional, default = [None, None]
Offset of the plot in the x and y directions from the current
origin.
fname : str, optional, default = None
If present, save the image to the specified file.
Notes
-----
Global and hemispherical projections (region='g') with corresponding
abbreviation used by `projection`:
Azimuthal projections
Lambert-azimuthal-equal-area (lam)
Stereographic-equal-angle (ste)
Orthographic (ort)
Azimuthal-equidistant (azi)
Gnomonic (gno)
Cylindrical projections (case sensitive)
cylindrical-equidistant (cyl)
Cylindrical-equal-area (Cyl)
CYLindrical-stereographic (CYL)
Miller-cylindrical (mil)
Miscellaneous projections
Mollweide (mol)
Hammer (ham)
Winkel-Tripel (win)
Robinson (rob)
Eckert (eck)
Sinusoidal (sin)
Van-der-Grinten (van)
"""
if not _pygmt_module:
raise ImportError('plotgmt() requires installation of the module '
'pygmt.')
if tick_interval is None:
tick_interval = [None, None]
if minor_tick_interval is None:
minor_tick_interval = [None, None]
if self.kind == 'complex' and title is None:
title = ['Real component', 'Imaginary component']
if grid is True:
grid = tick_interval
if width is None:
width = _mpl.rcParams['figure.figsize'][0]
if colorbar is not None:
if colorbar not in set(['top', 'bottom', 'left', 'right']):
raise ValueError("colorbar must be 'top', 'bottom', 'left' or "
"'right'. Input value is {:s}."
.format(repr(colorbar)))
figure = self._plot_pygmt(
fig=fig, projection=projection, region=region, width=width,
unit=unit, central_latitude=central_latitude,
central_longitude=central_longitude, grid=grid,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval, ticks=ticks, title=title,
cmap=cmap, cmap_limits=cmap_limits,
cmap_limits_complex=cmap_limits_complex, cmap_reverse=cmap_reverse,
cmap_continuous=cmap_continuous, colorbar=colorbar,
cb_triangles=cb_triangles, cb_label=cb_label, cb_ylabel=cb_ylabel,
cb_tick_interval=cb_tick_interval, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval, horizon=horizon,
offset=offset)
if fname is not None:
figure.savefig(fname)
if fig is None:
return figure
def expand(self, normalization='4pi', csphase=1, **kwargs):
"""
Expand the grid into spherical harmonics.
Usage
-----
clm = x.expand([normalization, csphase, lmax_calc])
Returns
-------
clm : SHCoeffs class instance
Parameters
----------
normalization : str, optional, default = '4pi'
Normalization of the output class: '4pi', 'ortho', 'schmidt', or
'unnorm', for geodesy 4pi normalized, orthonormalized, Schmidt
semi-normalized, or unnormalized coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
lmax_calc : int, optional, default = x.lmax
Maximum spherical harmonic degree to return.
Notes
-----
When expanding a Driscoll and Healy (1994) sampled grid (grid='DH' or
'DH2') into spherical harmonic coefficients, the latitudinal bands at
90 N and S are downweighted to zero and have no influence on the
returned spherical harmonic coefficients.
"""
if type(normalization) != str:
raise ValueError('normalization must be a string. ' +
'Input type is {:s}.'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value is {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be either 1 or -1. Input value is {:s}."
.format(repr(csphase))
)
return self._expand(normalization=normalization, csphase=csphase,
**kwargs)
def info(self):
"""
Print a summary of the data stored in the SHGrid instance.
Usage
-----
x.info()
"""
print(repr(self))
# ---- Real Driscoll and Healy grid class ----
class DHRealGrid(SHGrid):
"""Class for real Driscoll and Healy (1994) grids."""
@staticmethod
def istype(kind):
return kind == 'real'
@staticmethod
def isgrid(grid):
return grid == 'DH'
def __init__(self, array, copy=True):
self.nlat, self.nlon = array.shape
if self.nlat % 2 != 0:
self.n = self.nlat - 1
self.extend = True
else:
self.n = self.nlat
self.extend = False
if self.nlon == 2 * self.nlat - self.extend:
self.sampling = 2
elif self.nlon == self.nlat:
self.sampling = 1
else:
raise ValueError('Input array has shape (nlat={:d}, nlon={:d}) '
.format(self.nlat, self.nlon) +
'but needs nlon=nlat, nlon=2*nlat, or '
'nlon=2*nlat-1.'
)
self.lmax = int(self.n / 2 - 1)
self.grid = 'DH'
self.kind = 'real'
if copy:
self.data = _np.copy(array)
else:
self.data = array
def _lats(self):
"""Return the latitudes (in degrees) of the gridded data."""
if self.extend:
lats = _np.linspace(90.0, -90.0, num=self.nlat)
else:
lats = _np.linspace(90.0, -90.0 + 180.0 / self.nlat, num=self.nlat)
return lats
def _lons(self):
"""Return the longitudes (in degrees) of the gridded data."""
if self.extend:
lons = _np.linspace(0.0, 360.0, num=self.nlon)
else:
lons = _np.linspace(0.0, 360.0 - 360.0 / self.nlon, num=self.nlon)
return lons
def _expand(self, normalization, csphase, **kwargs):
"""Expand the grid into real spherical harmonics."""
if normalization.lower() == '4pi':
norm = 1
elif normalization.lower() == 'schmidt':
norm = 2
elif normalization.lower() == 'unnorm':
norm = 3
elif normalization.lower() == 'ortho':
norm = 4
else:
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value is {:s}."
.format(repr(normalization))
)
cilm = _shtools.SHExpandDH(self.data[:self.nlat-self.extend,
:self.nlon-self.extend],
norm=norm, csphase=csphase,
sampling=self.sampling, **kwargs)
coeffs = SHCoeffs.from_array(cilm,
normalization=normalization.lower(),
csphase=csphase, copy=False)
return coeffs
def _plot(self, projection=None, xlabel=None, ylabel=None, ax=None,
ax2=None, colorbar=None, cb_triangles=None, cb_label=None,
grid=False, axes_labelsize=None, tick_labelsize=None, title=None,
titlesize=None, cmap=None, tick_interval=None, ticks=None,
minor_tick_interval=None, cb_tick_interval=None, cb_ylabel=None,
cb_minor_tick_interval=None, cmap_limits=None, cmap_reverse=None,
cmap_limits_complex=None, cb_offset=None, cb_width=None):
"""Plot the data as a matplotlib cylindrical projection,
or with Cartopy when projection is specified."""
if ax is None:
if colorbar is not None:
if colorbar in set(['top', 'bottom']):
scale = 0.67
else:
scale = 0.5
else:
scale = 0.55
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0] * scale)
fig = _plt.figure(figsize=figsize)
if projection is not None:
axes = fig.add_subplot(111, projection=projection)
else:
axes = fig.add_subplot(111)
else:
if projection is not None:
fig = ax.get_figure()
geometry = ax.get_geometry()
ax.remove()
axes = fig.add_subplot(*geometry, projection=projection)
else:
axes = ax
if tick_interval[0] is None:
xticks = []
else:
xticks = _np.linspace(0, 360, num=360//tick_interval[0]+1,
endpoint=True)
if tick_interval[1] is None:
yticks = []
else:
yticks = _np.linspace(-90, 90, num=180//tick_interval[1]+1,
endpoint=True)
if minor_tick_interval[0] is None:
minor_xticks = []
else:
minor_xticks = _np.linspace(
0, 360, num=360//minor_tick_interval[0]+1, endpoint=True)
if minor_tick_interval[1] is None:
minor_yticks = []
else:
minor_yticks = _np.linspace(
-90, 90, num=180//minor_tick_interval[1]+1, endpoint=True)
# make colormap
if cmap_limits is None:
cmap_limits = [self.min(), self.max()]
if len(cmap_limits) == 3:
num = int((cmap_limits[1] - cmap_limits[0]) / cmap_limits[2])
if isinstance(cmap, _mpl.colors.Colormap):
cmap_scaled = cmap._resample(num)
else:
cmap_scaled = _mpl.cm.get_cmap(cmap, num)
else:
cmap_scaled = _mpl.cm.get_cmap(cmap)
if cmap_reverse:
cmap_scaled = cmap_scaled.reversed()
# compute colorbar ticks
cb_ticks = None
cb_minor_ticks = None
vmin = cmap_limits[0]
vmax = cmap_limits[1]
if cb_tick_interval is not None:
if _np.sign(vmin) == -1.:
start = (abs(vmin) // cb_tick_interval) \
* cb_tick_interval * _np.sign(vmin)
else:
start = (vmin // cb_tick_interval + 1) \
* cb_tick_interval
if _np.sign(vmax) == -1.:
stop = (abs(vmax) // cb_tick_interval + 1) \
* cb_tick_interval * _np.sign(vmax)
else:
stop = (vmax // cb_tick_interval) * cb_tick_interval
cb_ticks = _np.linspace(start, stop,
num=int((stop-start)//cb_tick_interval+1),
endpoint=True)
if cb_minor_tick_interval is not None:
if _np.sign(vmin) == -1.:
start = (abs(vmin) // cb_minor_tick_interval) \
* cb_minor_tick_interval * _np.sign(vmin)
else:
start = (vmin // cb_minor_tick_interval + 1) \
* cb_minor_tick_interval
if _np.sign(vmax) == -1.:
stop = (abs(vmax) // cb_minor_tick_interval + 1) \
* cb_minor_tick_interval * _np.sign(vmax)
else:
stop = (vmax // cb_minor_tick_interval) * \
cb_minor_tick_interval
cb_minor_ticks = _np.linspace(
start, stop, num=int((stop-start)//cb_minor_tick_interval+1),
endpoint=True)
# determine which ticks to plot
if 'W' in ticks:
left, labelleft = True, True
elif 'w' in ticks:
left, labelleft = True, False
else:
left, labelleft = False, False
if 'S' in ticks:
bottom, labelbottom = True, True
elif 's' in ticks:
bottom, labelbottom = True, False
else:
bottom, labelbottom = False, False
if 'E' in ticks:
right, labelright = True, True
elif 'e' in ticks:
right, labelright = True, False
else:
right, labelright = False, False
if 'N' in ticks:
top, labeltop = True, True
elif 'n' in ticks:
top, labeltop = True, False
else:
top, labeltop = False, False
extent = (-360. / self.sampling / self.n / 2.,
360. + 360. / self.sampling / self.n * (self.extend - 0.5),
-90. - 180. / self.n * (self.extend - 0.5),
90. + 180. / 2. / self.n)
# Add space for annotations between plot and colorbar. This will be
# False for map projections that do not support longitude labels
cb_space = True
# plot image, ticks, and annotations
if projection is not None:
axes.set_global()
cim = axes.imshow(
self.data, transform=_ccrs.PlateCarree(central_longitude=0.0),
origin='upper', extent=extent, cmap=cmap_scaled,
vmin=cmap_limits[0], vmax=cmap_limits[1])
if isinstance(projection, _ccrs.PlateCarree):
axes.set_xticks(
xticks, crs=_ccrs.PlateCarree(central_longitude=0.0))
axes.set_yticks(
yticks, crs=_ccrs.PlateCarree(central_longitude=0.0))
axes.set_xticks(minor_xticks, minor=True,
crs=_ccrs.PlateCarree(central_longitude=0.0))
axes.set_yticks(minor_yticks, minor=True,
crs=_ccrs.PlateCarree(central_longitude=0.0))
axes.xaxis.set_major_formatter(_LongitudeFormatter())
axes.yaxis.set_major_formatter(_LatitudeFormatter())
else:
cb_space = False
if grid:
axes.gridlines(xlocs=xticks-180, ylocs=yticks,
crs=_ccrs.PlateCarree(central_longitude=0.0))
else:
cim = axes.imshow(self.data, origin='upper', extent=extent,
cmap=cmap_scaled, vmin=cmap_limits[0],
vmax=cmap_limits[1])
axes.set(xlim=(0, 360), ylim=(-90, 90))
axes.set_xlabel(xlabel, fontsize=axes_labelsize)
axes.set_ylabel(ylabel, fontsize=axes_labelsize)
axes.set(xticks=xticks, yticks=yticks)
axes.set_xticks(minor_xticks, minor=True)
axes.set_yticks(minor_yticks, minor=True)
axes.grid(grid, which='major')
axes.xaxis.set_major_formatter(
_mpl.ticker.FormatStrFormatter("%d"+u"\u00B0"))
axes.yaxis.set_major_formatter(
_mpl.ticker.FormatStrFormatter("%d"+u"\u00B0"))
axes.tick_params(bottom=bottom, top=top, right=right, left=left,
labelbottom=labelbottom, labeltop=labeltop,
labelleft=labelleft, labelright=labelright,
which='both')
axes.tick_params(which='major', labelsize=tick_labelsize)
if title is not None:
axes.set_title(title, fontsize=titlesize)
# plot colorbar
if colorbar is not None:
if cb_offset is None:
if colorbar in set(['left', 'right']):
offset = 0.15
if (colorbar == 'left' and 'W' in ticks) or \
(colorbar == 'right' and 'E' in ticks):
offset += 2 * tick_labelsize / 72.
# add space for ylabel on left of plot only
if ylabel != '' and ylabel is not None and \
projection is None and colorbar == 'left':
offset += 1.4 * axes_labelsize / 72.
else:
offset = 0.
# add space for ticks
if (colorbar == 'bottom' and bottom and cb_space) or \
(colorbar == 'top' and top and cb_space):
offset += _mpl.rcParams['xtick.major.size']
# add space for labels
if (colorbar == 'bottom' and labelbottom and cb_space) or \
(colorbar == 'top' and labeltop and cb_space):
offset += _mpl.rcParams['xtick.major.pad']
offset += tick_labelsize
# add space for xlabel on bottom of plot only
if xlabel != '' and xlabel is not None and \
projection is None and colorbar == 'bottom':
offset += axes_labelsize
offset += 1.3 * _mpl.rcParams['font.size'] # add extra
offset /= 72. # convert to inches
else:
offset = cb_offset / 72.0 # convert to inches
divider = _make_axes_locatable(axes)
if colorbar in set(['left', 'right']):
orientation = 'vertical'
if cb_width is None:
size = '2.5%'
else:
size = '{:f}%'.format(cb_width)
else:
orientation = 'horizontal'
if cb_width is None:
size = '5%'
else:
size = '{:f}%'.format(cb_width)
cax = divider.append_axes(colorbar, size=size, pad=offset,
axes_class=_plt.Axes)
cbar = _plt.colorbar(cim, cax=cax, orientation=orientation,
extend=cb_triangles)
if colorbar == 'left':
cbar.ax.yaxis.set_ticks_position('left')
cbar.ax.yaxis.set_label_position('left')
if colorbar == 'top':
cbar.ax.xaxis.set_ticks_position('top')
cbar.ax.xaxis.set_label_position('top')
if cb_label is not None:
cbar.set_label(cb_label, fontsize=axes_labelsize)
if cb_ylabel is not None:
if colorbar in set(['left', 'right']):
cbar.ax.xaxis.set_label_position('top')
cbar.ax.set_xlabel(cb_ylabel, fontsize=tick_labelsize)
else:
cbar.ax.yaxis.set_label_position('right')
cbar.ax.set_ylabel(cb_ylabel, fontsize=tick_labelsize,
rotation=0., labelpad=axes_labelsize/2.,
va='center', ha='left')
if cb_ticks is not None:
cbar.set_ticks(cb_ticks)
if cb_minor_ticks is not None:
if colorbar in set(['top', 'bottom']):
cbar.ax.xaxis.set_ticks(cb_minor_ticks, minor=True)
else:
cbar.ax.yaxis.set_ticks(cb_minor_ticks, minor=True)
cbar.ax.tick_params(labelsize=tick_labelsize)
if ax is None:
return fig, axes
def _plot_pygmt(self, fig=None, projection=None, region=None, width=None,
unit=None, central_latitude=None, central_longitude=None,
grid=None, tick_interval=None, minor_tick_interval=None,
ticks=None, title=None, cmap=None, cmap_limits=None,
cmap_limits_complex=None, cmap_reverse=None,
cmap_continuous=None, colorbar=None, cb_triangles=None,
cb_label=None, cb_ylabel=None, cb_tick_interval=None,
cb_minor_tick_interval=None, horizon=None, offset=None,
cb_offset=None):
"""
Plot projected data using pygmt.
"""
center = [central_longitude, central_latitude]
if projection.lower()[0:3] == 'mollweide'[0:3]:
proj_str = 'W' + str(center[0])
elif projection.lower()[0:3] == 'hammer'[0:3]:
proj_str = 'H' + str(center[0])
elif projection.lower()[0:3] == 'winkel-tripel'[0:3]:
proj_str = 'R' + str(center[0])
elif projection.lower()[0:3] == 'robinson'[0:3]:
proj_str = 'N' + str(center[0])
elif projection.lower()[0:3] == 'eckert'[0:3]:
proj_str = 'K' + str(center[0])
elif projection.lower()[0:3] == 'sinusoidal'[0:3]:
proj_str = 'I' + str(center[0])
elif projection.lower()[0:3] == 'van-der-grinten'[0:3]:
proj_str = 'V' + str(center[0])
elif projection.lower()[0:3] == 'lambert-azimuthal-equal-area'[0:3]:
proj_str = 'A' + str(center[0]) + '/' + str(center[1])
elif projection.lower()[0:3] == 'stereographic-equal-angle'[0:3]:
proj_str = 'S' + str(center[0]) + '/' + str(center[1])
elif projection.lower()[0:3] == 'orthographic'[0:3]:
proj_str = 'G' + str(center[0]) + '/' + str(center[1])
elif projection.lower()[0:3] == 'azimuthal-equidistant'[0:3]:
proj_str = 'E' + str(center[0]) + '/' + str(center[1])
elif projection.lower()[0:3] == 'gnomonic'[0:3]:
proj_str = 'F' + str(center[0]) + '/' + str(center[1]) + '/' \
+ str(horizon)
elif projection.lower()[0:3] == 'miller-cylindrical'[0:3]:
proj_str = 'J' + str(central_longitude)
elif projection[0:3] == 'cylindrical-equidistant'[0:3]:
proj_str = 'Q' + str(center[0]) + '/' + str(center[1])
elif projection[0:3] == 'Cylindrical-equal-area'[0:3]:
proj_str = 'Y' + str(center[0]) + '/' + str(center[1])
elif projection[0:3] == 'CYLindrical-stereographic'[0:3]:
proj_str = 'Cyl_stere' + '/' + str(center[0]) + '/' \
+ str(center[1])
else:
raise ValueError('Input projection is not recognized or '
'supported. Input projection = {:s}'
.format(repr(projection)))
proj_str += '/' + str(width) + unit
framex = 'x'
framey = 'y'
if grid[0] is not None:
framex += 'g' + str(grid[0])
if grid[1] is not None:
framey += 'g' + str(grid[1])
if tick_interval[0] is not None:
framex += 'a' + str(tick_interval[0])
if tick_interval[1] is not None:
framey += 'a' + str(tick_interval[1])
if minor_tick_interval[0] is not None:
framex += 'f' + str(minor_tick_interval[0])
if minor_tick_interval[1] is not None:
framey += 'f' + str(minor_tick_interval[1])
if title is not None:
ticks += '+t"{:s}"'.format(title)
frame = [framex, framey, ticks]
position = None
cb_str = None
if colorbar is not None:
if colorbar == 'right':
position = "JMR"
elif colorbar == 'bottom':
position = "JBC+h"
elif colorbar == 'left':
position = "JML"
elif colorbar == 'top':
position = "JTC+h"
if cb_offset is not None:
if colorbar == 'horizontal':
position += '+o0p/' + str(cb_offset) + 'p'
else:
position += '+o' + str(cb_offset) + 'p/0p'
if cb_triangles is not None:
if cb_triangles == 'neither':
pass
elif cb_triangles == 'both':
position += '+ebf'
elif cb_triangles == 'min':
position += '+eb'
elif cb_triangles == 'max':
position += '+ef'
else:
raise ValueError("cb_triangles must be 'neither', 'both' "
"'min' or 'max'. Input value is {:s}."
.format(repr(cb_triangles)))
cb_str = []
x_str = 'x'
if cb_label is not None:
cb_str.extend(['x+l"{:s}"'.format(cb_label)])
if cb_tick_interval is not None:
x_str += 'a' + str(cb_tick_interval)
if cb_minor_tick_interval is not None:
x_str += 'f' + str(cb_minor_tick_interval)
cb_str.extend([x_str])
if cb_ylabel is not None:
cb_str.extend(['y+l"{:s}"'.format(cb_ylabel)])
if offset[0] is not None:
xshift = str(offset[0]) + unit
else:
xshift = False
if offset[1] is not None:
yshift = str(offset[1]) + unit
else:
yshift = False
if fig is None:
figure = _pygmt.Figure()
else:
figure = fig
if cmap_limits is None:
cmap_limits = [self.min(), self.max()]
_pygmt.makecpt(series=cmap_limits, cmap=cmap, reverse=cmap_reverse,
continuous=cmap_continuous)
figure.grdimage(self.to_xarray(), region=region, projection=proj_str,
frame=frame, X=xshift, Y=yshift)
if colorbar is not None:
figure.colorbar(position=position, frame=cb_str)
return figure
# ---- Complex Driscoll and Healy grid class ----
class DHComplexGrid(SHGrid):
"""
Class for complex Driscoll and Healy (1994) grids.
"""
@staticmethod
def istype(kind):
return kind == 'complex'
@staticmethod
def isgrid(grid):
return grid == 'DH'
def __init__(self, array, copy=True):
self.nlat, self.nlon = array.shape
if self.nlat % 2 != 0:
self.n = self.nlat - 1
self.extend = True
else:
self.n = self.nlat
self.extend = False
if self.nlon == 2 * self.nlat - self.extend:
self.sampling = 2
elif self.nlon == self.nlat:
self.sampling = 1
else:
raise ValueError('Input array has shape (nlat={:d}, nlon={:d}) '
.format(self.nlat, self.nlon) +
'but needs nlon=nlat, nlon=2*nlat, or '
'nlon=2*nlat-1.'
)
self.lmax = int(self.n / 2 - 1)
self.grid = 'DH'
self.kind = 'complex'
if copy:
self.data = _np.copy(array)
else:
self.data = array
def _lats(self):
"""
Return a vector containing the latitudes (in degrees) of each row
of the gridded data.
"""
if self.extend:
lats = _np.linspace(90.0, -90.0, num=self.nlat)
else:
lats = _np.linspace(90.0, -90.0 + 180.0 / self.nlat, num=self.nlat)
return lats
def _lons(self):
"""
Return a vector containing the longitudes (in degrees) of each row
of the gridded data.
"""
if self.extend:
lons = _np.linspace(0.0, 360.0, num=self.nlon)
else:
lons = _np.linspace(0.0, 360.0 - 360.0 / self.nlon, num=self.nlon)
return lons
def _expand(self, normalization, csphase, **kwargs):
"""Expand the grid into real spherical harmonics."""
if normalization.lower() == '4pi':
norm = 1
elif normalization.lower() == 'schmidt':
norm = 2
elif normalization.lower() == 'schmidt':
norm = 3
elif normalization.lower() == 'ortho':
norm = 4
else:
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value is {:s}."
.format(repr(normalization))
)
cilm = _shtools.SHExpandDHC(self.data[:self.nlat-self.extend,
:self.nlon-self.extend],
norm=norm, csphase=csphase, **kwargs)
coeffs = SHCoeffs.from_array(cilm, normalization=normalization.lower(),
csphase=csphase, copy=False)
return coeffs
def _plot(self, projection=None, xlabel=None, ylabel=None, colorbar=None,
cb_triangles=None, cb_label=None, grid=False, ticks=None,
axes_labelsize=None, tick_labelsize=None, title=None,
titlesize=None, cmap=None, ax=None, ax2=None,
tick_interval=None, minor_tick_interval=None, cb_ylabel=None,
cb_tick_interval=None, cb_minor_tick_interval=None,
cmap_limits=None, cmap_reverse=None, cmap_limits_complex=None,
cb_offset=None, cb_width=None):
"""Plot the raw data as a matplotlib simple cylindrical projection,
or with Cartopy when projection is specified."""
if ax is None:
if colorbar is not None:
if colorbar in set(['top', 'bottom']):
scale = 1.5
else:
scale = 1.1
else:
scale = 1.2
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0]*scale)
fig, axes = _plt.subplots(2, 1, figsize=figsize)
axreal = axes.flat[0]
axcomplex = axes.flat[1]
else:
axreal = ax
axcomplex = ax2
self.to_real().plot(projection=projection, tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
colorbar=colorbar, cb_triangles=cb_triangles,
cb_label=cb_label, ticks=ticks,
cb_tick_interval=cb_tick_interval,
cb_minor_tick_interval=cb_minor_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
tick_labelsize=tick_labelsize, cb_offset=cb_offset,
title=title[0], titlesize=titlesize,
xlabel=xlabel, ylabel=ylabel, cb_ylabel=cb_ylabel,
cb_width=cb_width, cmap=cmap,
cmap_limits=cmap_limits, cmap_reverse=cmap_reverse,
ax=axreal)
self.to_imag().plot(projection=projection, tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
colorbar=colorbar, cb_triangles=cb_triangles,
cb_label=cb_label, ticks=ticks,
cb_tick_interval=cb_tick_interval,
cb_minor_tick_interval=cb_minor_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
tick_labelsize=tick_labelsize, cb_ylabel=cb_ylabel,
title=title[1], titlesize=titlesize,
cmap=cmap, cmap_limits=cmap_limits_complex,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_width=cb_width, xlabel=xlabel, ylabel=ylabel,
ax=axcomplex)
if ax is None:
return fig, axes
def _plot_pygmt(self, fig=None, projection=None, region=None, width=None,
unit=None, central_latitude=None, central_longitude=None,
grid=None, tick_interval=None, minor_tick_interval=None,
ticks=None, title=None, cmap=None, cmap_limits=None,
cmap_limits_complex=None, cmap_reverse=None,
cmap_continuous=None, colorbar=None, cb_triangles=None,
cb_label=None, cb_ylabel=None, cb_tick_interval=None,
cb_minor_tick_interval=None, horizon=None, offset=None,
cb_offset=None):
"""
Plot projected data using pygmt.
"""
if fig is None:
figure = _pygmt.Figure()
else:
figure = fig
self.to_imag().plotgmt(fig=figure, projection=projection,
region=region, width=width, unit=unit,
central_latitude=central_latitude,
central_longitude=central_longitude,
grid=grid, tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
ticks=ticks, title=title[1], cmap=cmap,
cmap_limits=cmap_limits_complex,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cmap_continuous=cmap_continuous,
colorbar=colorbar, cb_triangles=cb_triangles,
cb_label=cb_label, cb_ylabel=cb_ylabel,
cb_tick_interval=cb_tick_interval,
cb_minor_tick_interval=cb_minor_tick_interval,
horizon=horizon, offset=offset)
offset_real = _np.copy(offset)
if offset_real[1] is None:
offset_real[1] = width / 2. + 50. / 72.
else:
offset_real[1] += width / 2. + 50. / 72.
self.to_real().plotgmt(fig=figure, projection=projection,
region=region, width=width, unit=unit,
central_latitude=central_latitude,
central_longitude=central_longitude,
grid=grid, tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
ticks=ticks, title=title[0], cmap=cmap,
cmap_limits=cmap_limits, cb_offset=cb_offset,
cmap_reverse=cmap_reverse,
cmap_continuous=cmap_continuous,
colorbar=colorbar, cb_triangles=cb_triangles,
cb_label=cb_label, cb_ylabel=cb_ylabel,
cb_tick_interval=cb_tick_interval,
cb_minor_tick_interval=cb_minor_tick_interval,
horizon=horizon, offset=offset_real)
return figure
# ---- Real Gauss-Legendre Quadrature grid class ----
class GLQRealGrid(SHGrid):
"""
Class for real Gauss-Legendre Quadrature grids.
"""
@staticmethod
def istype(kind):
return kind == 'real'
@staticmethod
def isgrid(grid):
return grid == 'GLQ'
def __init__(self, array, zeros=None, weights=None, copy=True):
self.nlat, self.nlon = array.shape
self.lmax = self.nlat - 1
if self.nlon == 2 * self.lmax + 1:
self.extend = False
elif self.nlon == 2 * self.lmax + 2:
self.extend = True
else:
raise ValueError('Input array has shape (nlat={:d}, nlon={:d}) '
.format(self.nlat, self.nlon) +
'but needs ({:d}, {:d}) or ({:d}, {:d}).'
.format(self.lmax+1, 2*self.lmax+1, self.lmax+1,
2*self.lmax+2)
)
if zeros is None or weights is None:
self.zeros, self.weights = _shtools.SHGLQ(self.lmax)
else:
self.zeros = zeros
self.weights = weights
self.grid = 'GLQ'
self.kind = 'real'
if copy:
self.data = _np.copy(array)
else:
self.data = array
def _lats(self):
"""
Return a vector containing the latitudes (in degrees) of each row
of the gridded data.
"""
lats = 90. - _np.arccos(self.zeros) * 180. / _np.pi
return lats
def _lons(self):
"""
Return a vector containing the longitudes (in degrees) of each column
of the gridded data.
"""
if self.extend:
lons = _np.linspace(0.0, 360.0, num=self.nlon)
else:
lons = _np.linspace(0.0, 360.0 - 360.0 / self.nlon, num=self.nlon)
return lons
def _expand(self, normalization, csphase, **kwargs):
"""Expand the grid into real spherical harmonics."""
if normalization.lower() == '4pi':
norm = 1
elif normalization.lower() == 'schmidt':
norm = 2
elif normalization.lower() == 'unnorm':
norm = 3
elif normalization.lower() == 'ortho':
norm = 4
else:
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt' " +
"or 'unnorm'. Input value is {:s}."
.format(repr(normalization))
)
cilm = _shtools.SHExpandGLQ(self.data[:, :self.nlon-self.extend],
self.weights, self.zeros, norm=norm,
csphase=csphase, **kwargs)
coeffs = SHCoeffs.from_array(cilm, normalization=normalization.lower(),
csphase=csphase, copy=False)
return coeffs
def _plot(self, projection=None, xlabel=None, ylabel=None, ax=None,
ax2=None, colorbar=None, cb_triangles=None, cb_label=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
title=None, titlesize=None, cmap=None, tick_interval=None,
minor_tick_interval=None, cb_tick_interval=None, ticks=None,
cb_minor_tick_interval=None, cmap_limits=None, cmap_reverse=None,
cmap_limits_complex=None, cb_ylabel=None, cb_offset=None,
cb_width=None):
"""Plot the data using a matplotlib cylindrical projection."""
if ax is None:
if colorbar is not None:
if colorbar in set(['top', 'bottom']):
scale = 0.67
else:
scale = 0.5
else:
scale = 0.55
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0] * scale)
fig, axes = _plt.subplots(1, 1, figsize=figsize)
else:
axes = ax
if tick_interval[0] is None:
xticks = []
else:
xticks = _np.arange(0, self.nlon, tick_interval[0])
if tick_interval[1] is None:
yticks = []
else:
yticks = _np.arange(0, self.nlat, tick_interval[1])
if minor_tick_interval[0] is None:
minor_xticks = []
else:
minor_xticks = _np.arange(0, self.nlon, minor_tick_interval[0])
if minor_tick_interval[1] is None:
minor_yticks = []
else:
minor_yticks = _np.arange(0, self.nlat, minor_tick_interval[1])
# make colormap
if cmap_limits is None:
cmap_limits = [self.min(), self.max()]
if len(cmap_limits) == 3:
num = int((cmap_limits[1] - cmap_limits[0]) / cmap_limits[2])
if isinstance(cmap, _mpl.colors.Colormap):
cmap_scaled = cmap._resample(num)
else:
cmap_scaled = _mpl.cm.get_cmap(cmap, num)
else:
cmap_scaled = _mpl.cm.get_cmap(cmap)
if cmap_reverse:
cmap_scaled = cmap_scaled.reversed()
# compute colorbar ticks
cb_ticks = None
cb_minor_ticks = None
vmin = cmap_limits[0]
vmax = cmap_limits[1]
if cb_tick_interval is not None:
if _np.sign(vmin) == -1.:
start = (abs(vmin) // cb_tick_interval) \
* cb_tick_interval * _np.sign(vmin)
else:
start = (vmin // cb_tick_interval + 1) \
* cb_tick_interval
if _np.sign(vmax) == -1.:
stop = (abs(vmax) // cb_tick_interval + 1) \
* cb_tick_interval * _np.sign(vmax)
else:
stop = (vmax // cb_tick_interval) * cb_tick_interval
cb_ticks = _np.linspace(start, stop,
num=int((stop-start)//cb_tick_interval+1),
endpoint=True)
if cb_minor_tick_interval is not None:
if _np.sign(vmin) == -1.:
start = (abs(vmin) // cb_minor_tick_interval) \
* cb_minor_tick_interval * _np.sign(vmin)
else:
start = (vmin // cb_minor_tick_interval + 1) \
* cb_minor_tick_interval
if _np.sign(vmax) == -1.:
stop = (abs(vmax) // cb_minor_tick_interval + 1) \
* cb_minor_tick_interval * _np.sign(vmax)
else:
stop = (vmax // cb_minor_tick_interval) * \
cb_minor_tick_interval
cb_minor_ticks = _np.linspace(
start, stop, num=int((stop-start)//cb_minor_tick_interval+1),
endpoint=True)
# determine which ticks to plot
if 'W' in ticks:
left, labelleft = True, True
elif 'w' in ticks:
left, labelleft = True, False
else:
left, labelleft = False, False
if 'S' in ticks:
bottom, labelbottom = True, True
elif 's' in ticks:
bottom, labelbottom = True, False
else:
bottom, labelbottom = False, False
if 'E' in ticks:
right, labelright = True, True
elif 'e' in ticks:
right, labelright = True, False
else:
right, labelright = False, False
if 'N' in ticks:
top, labeltop = True, True
elif 'n' in ticks:
top, labeltop = True, False
else:
top, labeltop = False, False
# plot image, ticks, and annotations
extent = (-0.5, self.nlon-0.5, -0.5, self.nlat-0.5)
cim = axes.imshow(self.data, extent=extent, origin='upper',
cmap=cmap_scaled, vmin=cmap_limits[0],
vmax=cmap_limits[1])
axes.set(xticks=xticks, yticks=yticks)
axes.set_xlabel(xlabel, fontsize=axes_labelsize)
axes.set_ylabel(ylabel, fontsize=axes_labelsize)
axes.set_xticklabels(xticks, fontsize=tick_labelsize)
axes.set_yticklabels(yticks, fontsize=tick_labelsize)
axes.set_xticks(minor_xticks, minor=True)
axes.set_yticks(minor_yticks, minor=True)
axes.tick_params(bottom=bottom, top=top, right=right, left=left,
labelbottom=labelbottom, labeltop=labeltop,
labelleft=labelleft, labelright=labelright,
which='both')
axes.grid(grid, which='major')
if title is not None:
axes.set_title(title, fontsize=titlesize)
# plot colorbar
if colorbar is not None:
if cb_offset is None:
if colorbar in set(['left', 'right']):
offset = 0.15
if (colorbar == 'left' and 'W' in ticks) or \
(colorbar == 'right' and 'E' in ticks):
offset += 2 * tick_labelsize / 72.
# add space for ylabel on left of plot only
if ylabel != '' and ylabel is not None and \
colorbar == 'left':
offset += 1.4 * axes_labelsize / 72.
else:
offset = 0.
# add space for ticks
if (colorbar == 'bottom' and bottom) or \
(colorbar == 'top' and top):
offset += _mpl.rcParams['xtick.major.size']
# add space for labels
if (colorbar == 'bottom' and labelbottom) or \
(colorbar == 'top' and labeltop):
offset += _mpl.rcParams['xtick.major.pad']
offset += tick_labelsize
# add space for xlabel on bottom of plot only
if xlabel != '' and xlabel is not None and \
colorbar == 'bottom':
offset += axes_labelsize
offset += 1.3 * _mpl.rcParams['font.size'] # add extra
offset /= 72. # convert to inches
else:
offset = cb_offset / 72. # convert to inches
divider = _make_axes_locatable(axes)
if colorbar in set(['left', 'right']):
orientation = 'vertical'
if cb_width is None:
size = '2.5%'
else:
size = '{:f}%'.format(cb_width)
else:
orientation = 'horizontal'
if cb_width is None:
size = '5%'
else:
size = '{:f}%'.format(cb_width)
cax = divider.append_axes(colorbar, size=size, pad=offset,
axes_class=_plt.Axes)
cbar = _plt.colorbar(cim, cax=cax, orientation=orientation,
extend=cb_triangles)
if colorbar == 'left':
cbar.ax.yaxis.set_ticks_position('left')
cbar.ax.yaxis.set_label_position('left')
if colorbar == 'top':
cbar.ax.xaxis.set_ticks_position('top')
cbar.ax.xaxis.set_label_position('top')
if cb_label is not None:
cbar.set_label(cb_label, fontsize=axes_labelsize)
if cb_ylabel is not None:
if colorbar in set(['left', 'right']):
cbar.ax.xaxis.set_label_position('top')
cbar.ax.set_xlabel(cb_ylabel, fontsize=tick_labelsize)
else:
cbar.ax.yaxis.set_label_position('right')
cbar.ax.set_ylabel(cb_ylabel, fontsize=tick_labelsize,
rotation=0., labelpad=axes_labelsize/2.,
va='center', ha='left')
if cb_ticks is not None:
cbar.set_ticks(cb_ticks)
if cb_minor_ticks is not None:
if colorbar in set(['top', 'bottom']):
cbar.ax.xaxis.set_ticks(cb_minor_ticks, minor=True)
else:
cbar.ax.yaxis.set_ticks(cb_minor_ticks, minor=True)
cbar.ax.tick_params(labelsize=tick_labelsize)
if ax is None:
return fig, axes
def _plot_pygmt(self, **kwargs):
"""
Plot the projected data using pygmt.
"""
raise NotImplementedError('plotgmt() does not support the plotting '
'of GLQ gridded data.')
# ---- Complex Gauss-Legendre Quadrature grid class ----
class GLQComplexGrid(SHGrid):
"""
Class for complex Gauss-Legendre Quadrature grids.
"""
@staticmethod
def istype(kind):
return kind == 'complex'
@staticmethod
def isgrid(grid):
return grid == 'GLQ'
def __init__(self, array, zeros=None, weights=None, copy=True):
self.nlat, self.nlon = array.shape
self.lmax = self.nlat - 1
if self.nlon == 2 * self.lmax + 1:
self.extend = False
elif self.nlon == 2 * self.lmax + 2:
self.extend = True
else:
raise ValueError('Input array has shape (nlat={:d}, nlon={:d}) '
.format(self.nlat, self.nlon) +
'but needs ({:d}, {:d}) or ({:d}, {:d}).'
.format(self.lmax+1, 2*self.lmax+1, self.lmax+1,
2*self.lmax+2)
)
if zeros is None or weights is None:
self.zeros, self.weights = _shtools.SHGLQ(self.lmax)
else:
self.zeros = zeros
self.weights = weights
self.grid = 'GLQ'
self.kind = 'complex'
if copy:
self.data = _np.copy(array)
else:
self.data = array
def _lats(self):
"""Return the latitudes (in degrees) of the gridded data rows."""
lats = 90. - _np.arccos(self.zeros) * 180. / _np.pi
return lats
def _lons(self):
"""Return the longitudes (in degrees) of the gridded data columns."""
if self.extend:
lons = _np.linspace(0.0, 360.0, num=self.nlon)
else:
lons = _np.linspace(0.0, 360.0 - 360.0 / self.nlon, num=self.nlon)
return lons
def _expand(self, normalization, csphase, **kwargs):
"""Expand the grid into real spherical harmonics."""
if normalization.lower() == '4pi':
norm = 1
elif normalization.lower() == 'schmidt':
norm = 2
elif normalization.lower() == 'unnorm':
norm = 3
elif normalization.lower() == 'ortho':
norm = 4
else:
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt' " +
"or 'unnorm'. Input value is {:s}."
.format(repr(normalization))
)
cilm = _shtools.SHExpandGLQC(self.data[:, :self.nlon-self.extend],
self.weights, self.zeros, norm=norm,
csphase=csphase, **kwargs)
coeffs = SHCoeffs.from_array(cilm, normalization=normalization.lower(),
csphase=csphase, copy=False)
return coeffs
def _plot(self, projection=None, minor_xticks=[], minor_yticks=[],
xlabel=None, ylabel=None, ax=None, ax2=None, colorbar=None,
cb_triangles=None, cb_label=None, grid=False, ticks=None,
axes_labelsize=None, tick_labelsize=None, title=None,
titlesize=None, cmap=None, tick_interval=None, cb_ylabel=None,
minor_tick_interval=None, cb_tick_interval=None,
cb_minor_tick_interval=None, cmap_limits=None, cmap_reverse=None,
cmap_limits_complex=None, cb_offset=None, cb_width=None):
"""Plot the raw data using a simply cylindrical projection."""
if ax is None:
if colorbar is not None:
if colorbar in set(['top', 'bottom']):
scale = 1.5
else:
scale = 1.1
else:
scale = 1.2
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0]*scale)
fig, axes = _plt.subplots(2, 1, figsize=figsize)
axreal = axes.flat[0]
axcomplex = axes.flat[1]
else:
axreal = ax
axcomplex = ax2
self.to_real().plot(projection=projection, tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
colorbar=colorbar, cb_triangles=cb_triangles,
cb_label=cb_label, ticks=ticks,
cb_tick_interval=cb_tick_interval,
cb_minor_tick_interval=cb_minor_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
tick_labelsize=tick_labelsize, cb_offset=cb_offset,
title=title[0], titlesize=titlesize,
xlabel=xlabel, ylabel=ylabel, cb_ylabel=cb_ylabel,
cb_width=cb_width, cmap=cmap,
cmap_limits=cmap_limits, cmap_reverse=cmap_reverse,
ax=axreal)
self.to_imag().plot(projection=projection, tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
colorbar=colorbar, cb_triangles=cb_triangles,
cb_label=cb_label, ticks=ticks,
cb_tick_interval=cb_tick_interval,
cb_minor_tick_interval=cb_minor_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
tick_labelsize=tick_labelsize, cb_offset=cb_offset,
title=title[1], titlesize=titlesize,
cmap=cmap, cmap_limits=cmap_limits_complex,
cmap_reverse=cmap_reverse, cb_ylabel=cb_ylabel,
cb_width=cb_width, xlabel=xlabel, ylabel=ylabel,
ax=axcomplex)
if ax is None:
return fig, axes
def _plot_pygmt(self, **kwargs):
"""
Plot the projected data using pygmt.
"""
raise NotImplementedError('plotgmt() does not support the plotting '
'of complex GLQ gridded data.')
| [
"numpy.load",
"numpy.triu",
"numpy.random.seed",
"numpy.abs",
"matplotlib.cm.get_cmap",
"cartopy.mpl.ticker.LongitudeFormatter",
"numpy.empty",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"matplotlib.colors.LogNorm",
"numpy.mean",
"numpy.random.normal",
"matplotlib.pyplot.Norm... | [((9883, 9907), 'numpy.iscomplexobj', '_np.iscomplexobj', (['coeffs'], {}), '(coeffs)\n', (9899, 9907), True, 'import numpy as _np\n'), ((15106, 15120), 'numpy.arange', '_np.arange', (['nl'], {}), '(nl)\n', (15116, 15120), True, 'import numpy as _np\n'), ((22676, 22700), 'numpy.iscomplexobj', '_np.iscomplexobj', (['coeffs'], {}), '(coeffs)\n', (22692, 22700), True, 'import numpy as _np\n'), ((26365, 26399), 'numpy.zeros', '_np.zeros', (['(2, lmax + 1, lmax + 1)'], {}), '((2, lmax + 1, lmax + 1))\n', (26374, 26399), True, 'import numpy as _np\n'), ((28574, 28600), 'xarray.open_dataset', '_xr.open_dataset', (['filename'], {}), '(filename)\n', (28590, 28600), True, 'import xarray as _xr\n'), ((29560, 29584), 'numpy.tril', '_np.tril', (['ds.coeffs.data'], {}), '(ds.coeffs.data)\n', (29568, 29584), True, 'import numpy as _np\n'), ((29597, 29626), 'numpy.triu', '_np.triu', (['ds.coeffs.data'], {'k': '(1)'}), '(ds.coeffs.data, k=1)\n', (29605, 29626), True, 'import numpy as _np\n'), ((29639, 29666), 'numpy.vstack', '_np.vstack', (['[s[-1], s[:-1]]'], {}), '([s[-1], s[:-1]])\n', (29649, 29666), True, 'import numpy as _np\n'), ((29679, 29695), 'numpy.transpose', '_np.transpose', (['s'], {}), '(s)\n', (29692, 29695), True, 'import numpy as _np\n'), ((30355, 30372), 'numpy.array', '_np.array', (['[c, s]'], {}), '([c, s])\n', (30364, 30372), True, 'import numpy as _np\n'), ((30385, 30409), 'numpy.iscomplexobj', '_np.iscomplexobj', (['coeffs'], {}), '(coeffs)\n', (30401, 30409), True, 'import numpy as _np\n'), ((31779, 31796), 'numpy.array', '_np.array', (['values'], {}), '(values)\n', (31788, 31796), True, 'import numpy as _np\n'), ((31810, 31823), 'numpy.array', '_np.array', (['ls'], {}), '(ls)\n', (31819, 31823), True, 'import numpy as _np\n'), ((31837, 31850), 'numpy.array', '_np.array', (['ms'], {}), '(ms)\n', (31846, 31850), True, 'import numpy as _np\n'), ((34754, 34767), 'xarray.Dataset', '_xr.Dataset', ([], {}), '()\n', (34765, 34767), True, 'import xarray as _xr\n'), ((35037, 35088), 'numpy.transpose', '_np.transpose', (['self.coeffs[1, :lmax + 1, :lmax + 1]'], {}), '(self.coeffs[1, :lmax + 1, :lmax + 1])\n', (35050, 35088), True, 'import numpy as _np\n'), ((35097, 35122), 'numpy.vstack', '_np.vstack', (['[s[1:], s[0]]'], {}), '([s[1:], s[0]])\n', (35107, 35122), True, 'import numpy as _np\n'), ((37657, 37677), 'copy.deepcopy', '_copy.deepcopy', (['self'], {}), '(self)\n', (37671, 37677), True, 'import copy as _copy\n'), ((48207, 48232), 'numpy.arange', '_np.arange', (['(self.lmax + 1)'], {}), '(self.lmax + 1)\n', (48217, 48232), True, 'import numpy as _np\n'), ((75257, 75277), 'numpy.arange', '_np.arange', (['(lmax + 1)'], {}), '(lmax + 1)\n', (75267, 75277), True, 'import numpy as _np\n'), ((82422, 82442), 'numpy.arange', '_np.arange', (['(lmax + 1)'], {}), '(lmax + 1)\n', (82432, 82442), True, 'import numpy as _np\n'), ((88273, 88293), 'numpy.arange', '_np.arange', (['(lmax + 1)'], {}), '(lmax + 1)\n', (88283, 88293), True, 'import numpy as _np\n'), ((88379, 88414), 'numpy.empty', '_np.empty', (['(lmax + 1, 2 * lmax + 1)'], {}), '((lmax + 1, 2 * lmax + 1))\n', (88388, 88414), True, 'import numpy as _np\n'), ((90500, 90531), 'numpy.ma.masked_invalid', '_np.ma.masked_invalid', (['spectrum'], {}), '(spectrum)\n', (90521, 90531), True, 'import numpy as _np\n'), ((90709, 90753), 'numpy.arange', '_np.arange', (['(-lmax)', '(lmax + 2)'], {'dtype': '_np.float'}), '(-lmax, lmax + 2, dtype=_np.float)\n', (90719, 90753), True, 'import numpy as _np\n'), ((90777, 90812), 'numpy.meshgrid', '_np.meshgrid', (['ls', 'ms'], {'indexing': '"""ij"""'}), "(ls, ms, indexing='ij')\n", (90789, 90812), True, 'import numpy as _np\n'), ((92685, 92712), 'matplotlib.pyplot.colorbar', '_plt.colorbar', (['cmesh'], {'ax': 'ax'}), '(cmesh, ax=ax)\n', (92698, 92712), True, 'import matplotlib.pyplot as _plt\n'), ((97665, 97685), 'numpy.arange', '_np.arange', (['(lmax + 1)'], {}), '(lmax + 1)\n', (97675, 97685), True, 'import numpy as _np\n'), ((97934, 97969), 'numpy.empty', '_np.empty', (['(lmax + 1, 2 * lmax + 1)'], {}), '((lmax + 1, 2 * lmax + 1))\n', (97943, 97969), True, 'import numpy as _np\n'), ((100091, 100122), 'numpy.ma.masked_invalid', '_np.ma.masked_invalid', (['spectrum'], {}), '(spectrum)\n', (100112, 100122), True, 'import numpy as _np\n'), ((100300, 100344), 'numpy.arange', '_np.arange', (['(-lmax)', '(lmax + 2)'], {'dtype': '_np.float'}), '(-lmax, lmax + 2, dtype=_np.float)\n', (100310, 100344), True, 'import numpy as _np\n'), ((100368, 100403), 'numpy.meshgrid', '_np.meshgrid', (['ls', 'ms'], {'indexing': '"""ij"""'}), "(ls, ms, indexing='ij')\n", (100380, 100403), True, 'import numpy as _np\n'), ((102276, 102303), 'matplotlib.pyplot.colorbar', '_plt.colorbar', (['cmesh'], {'ax': 'ax'}), '(cmesh, ax=ax)\n', (102289, 102303), True, 'import matplotlib.pyplot as _plt\n'), ((103711, 103761), 'numpy.zeros', '_np.zeros', (['(2, lmax + 1, lmax + 1)'], {'dtype': '_np.bool'}), '((2, lmax + 1, lmax + 1), dtype=_np.bool)\n', (103720, 103761), True, 'import numpy as _np\n'), ((103808, 103828), 'numpy.arange', '_np.arange', (['(lmax + 1)'], {}), '(lmax + 1)\n', (103818, 103828), True, 'import numpy as _np\n'), ((104573, 104634), 'numpy.zeros', '_np.zeros', (['(2, self.lmax + 1, self.lmax + 1)'], {'dtype': '"""complex"""'}), "((2, self.lmax + 1, self.lmax + 1), dtype='complex')\n", (104582, 104634), True, 'import numpy as _np\n'), ((111258, 111308), 'numpy.zeros', '_np.zeros', (['(2, lmax + 1, lmax + 1)'], {'dtype': '_np.bool'}), '((2, lmax + 1, lmax + 1), dtype=_np.bool)\n', (111267, 111308), True, 'import numpy as _np\n'), ((111355, 111375), 'numpy.arange', '_np.arange', (['(lmax + 1)'], {}), '(lmax + 1)\n', (111365, 111375), True, 'import numpy as _np\n'), ((113726, 113770), 'numpy.zeros', '_np.zeros', (['(2, self.lmax + 1, self.lmax + 1)'], {}), '((2, self.lmax + 1, self.lmax + 1))\n', (113735, 113770), True, 'import numpy as _np\n'), ((125265, 125288), 'numpy.iscomplexobj', '_np.iscomplexobj', (['array'], {}), '(array)\n', (125281, 125288), True, 'import numpy as _np\n'), ((130678, 130691), 'numpy.sin', '_np.sin', (['clat'], {}), '(clat)\n', (130685, 130691), True, 'import numpy as _np\n'), ((130710, 130723), 'numpy.cos', '_np.cos', (['lons'], {}), '(lons)\n', (130717, 130723), True, 'import numpy as _np\n'), ((130741, 130754), 'numpy.sin', '_np.sin', (['lons'], {}), '(lons)\n', (130748, 130754), True, 'import numpy as _np\n'), ((134239, 134265), 'xarray.open_dataarray', '_xr.open_dataarray', (['netcdf'], {}), '(netcdf)\n', (134257, 134265), True, 'import xarray as _xr\n'), ((134490, 134510), 'copy.deepcopy', '_copy.deepcopy', (['self'], {}), '(self)\n', (134504, 134510), True, 'import copy as _copy\n'), ((139677, 139716), 'xarray.Dataset', '_xr.Dataset', (['{name: _data}'], {'attrs': 'attrs'}), '({name: _data}, attrs=attrs)\n', (139688, 139716), True, 'import xarray as _xr\n'), ((140142, 140161), 'numpy.copy', '_np.copy', (['self.data'], {}), '(self.data)\n', (140150, 140161), True, 'import numpy as _np\n'), ((141119, 141137), 'numpy.min', '_np.min', (['self.data'], {}), '(self.data)\n', (141126, 141137), True, 'import numpy as _np\n'), ((141307, 141325), 'numpy.max', '_np.max', (['self.data'], {}), '(self.data)\n', (141314, 141325), True, 'import numpy as _np\n'), ((150158, 150177), 'matplotlib.pyplot.get_cmap', '_plt.get_cmap', (['cmap'], {}), '(cmap)\n', (150171, 150177), True, 'import matplotlib.pyplot as _plt\n'), ((150782, 150809), 'numpy.append', '_np.append', (['lons', '[lons[0]]'], {}), '(lons, [lons[0]])\n', (150792, 150809), True, 'import numpy as _np\n'), ((151007, 151033), 'numpy.radians', '_np.radians', (['lons_circular'], {}), '(lons_circular)\n', (151018, 151033), True, 'import numpy as _np\n'), ((151046, 151079), 'numpy.radians', '_np.radians', (['(90.0 - lats_circular)'], {}), '(90.0 - lats_circular)\n', (151057, 151079), True, 'import numpy as _np\n'), ((152403, 152469), 'matplotlib.pyplot.Normalize', '_plt.Normalize', (['(-magnmax_face / 2.0)', '(magnmax_face / 2.0)'], {'clip': '(True)'}), '(-magnmax_face / 2.0, magnmax_face / 2.0, clip=True)\n', (152417, 152469), True, 'import matplotlib.pyplot as _plt\n'), ((152824, 152837), 'matplotlib.pyplot.figure', '_plt.figure', ([], {}), '()\n', (152835, 152837), True, 'import matplotlib.pyplot as _plt\n'), ((195165, 195264), 'pygmt.makecpt', '_pygmt.makecpt', ([], {'series': 'cmap_limits', 'cmap': 'cmap', 'reverse': 'cmap_reverse', 'continuous': 'cmap_continuous'}), '(series=cmap_limits, cmap=cmap, reverse=cmap_reverse,\n continuous=cmap_continuous)\n', (195179, 195264), True, 'import pygmt as _pygmt\n'), ((203409, 203425), 'numpy.copy', '_np.copy', (['offset'], {}), '(offset)\n', (203417, 203425), True, 'import numpy as _np\n'), ((8217, 8239), 'numpy.zeros', '_np.zeros', (['(2, nl, nl)'], {}), '((2, nl, nl))\n', (8226, 8239), True, 'import numpy as _np\n'), ((8275, 8312), 'numpy.zeros', '_np.zeros', (['(2, nl, nl)'], {'dtype': 'complex'}), '((2, nl, nl), dtype=complex)\n', (8284, 8312), True, 'import numpy as _np\n'), ((15777, 15803), 'numpy.random.seed', '_np.random.seed', ([], {'seed': 'seed'}), '(seed=seed)\n', (15792, 15803), True, 'import numpy as _np\n'), ((15860, 15882), 'numpy.zeros', '_np.zeros', (['(2, nl, nl)'], {}), '((2, nl, nl))\n', (15869, 15882), True, 'import numpy as _np\n'), ((17345, 17422), 'numpy.pad', '_np.pad', (['coeffs', '((0, 0), (0, lmax - nl + 1), (0, lmax - nl + 1))', '"""constant"""'], {}), "(coeffs, ((0, 0), (0, lmax - nl + 1), (0, lmax - nl + 1)), 'constant')\n", (17352, 17422), True, 'import numpy as _np\n'), ((26276, 26294), 'numpy.deg2rad', '_np.deg2rad', (['theta'], {}), '(theta)\n', (26287, 26294), True, 'import numpy as _np\n'), ((34809, 34829), 'numpy.arange', '_np.arange', (['(lmax + 1)'], {}), '(lmax + 1)\n', (34819, 34829), True, 'import numpy as _np\n'), ((34868, 34888), 'numpy.arange', '_np.arange', (['(lmax + 1)'], {}), '(lmax + 1)\n', (34878, 34888), True, 'import numpy as _np\n'), ((47043, 47062), 'numpy.isscalar', '_np.isscalar', (['other'], {}), '(other)\n', (47055, 47062), True, 'import numpy as _np\n'), ((61292, 61311), 'numpy.radians', '_np.radians', (['angles'], {}), '(angles)\n', (61303, 61311), True, 'import numpy as _np\n'), ((66127, 66218), 'numpy.pad', '_np.pad', (['clm.coeffs', '((0, 0), (0, lmax - self.lmax), (0, lmax - self.lmax))', '"""constant"""'], {}), "(clm.coeffs, ((0, 0), (0, lmax - self.lmax), (0, lmax - self.lmax)),\n 'constant')\n", (66134, 66218), True, 'import numpy as _np\n'), ((66267, 66317), 'numpy.zeros', '_np.zeros', (['(2, lmax + 1, lmax + 1)'], {'dtype': '_np.bool'}), '((2, lmax + 1, lmax + 1), dtype=_np.bool)\n', (66276, 66317), True, 'import numpy as _np\n'), ((66339, 66359), 'numpy.arange', '_np.arange', (['(lmax + 1)'], {}), '(lmax + 1)\n', (66349, 66359), True, 'import numpy as _np\n'), ((75326, 75345), 'matplotlib.pyplot.subplots', '_plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (75339, 75345), True, 'import matplotlib.pyplot as _plt\n'), ((82491, 82510), 'matplotlib.pyplot.subplots', '_plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (82504, 82510), True, 'import matplotlib.pyplot as _plt\n'), ((88809, 88830), 'numpy.fliplr', '_np.fliplr', (['mnegative'], {}), '(mnegative)\n', (88819, 88830), True, 'import numpy as _np\n'), ((90903, 90918), 'matplotlib.pyplot.subplots', '_plt.subplots', ([], {}), '()\n', (90916, 90918), True, 'import matplotlib.pyplot as _plt\n'), ((91381, 91423), 'matplotlib.colors.LogNorm', '_mpl.colors.LogNorm', (['vmin', 'vmax'], {'clip': '(True)'}), '(vmin, vmax, clip=True)\n', (91400, 91423), True, 'import matplotlib as _mpl\n'), ((98400, 98421), 'numpy.fliplr', '_np.fliplr', (['mnegative'], {}), '(mnegative)\n', (98410, 98421), True, 'import numpy as _np\n'), ((100494, 100509), 'matplotlib.pyplot.subplots', '_plt.subplots', ([], {}), '()\n', (100507, 100509), True, 'import matplotlib.pyplot as _plt\n'), ((100972, 101014), 'matplotlib.colors.LogNorm', '_mpl.colors.LogNorm', (['vmin', 'vmax'], {'clip': '(True)'}), '(vmin, vmax, clip=True)\n', (100991, 101014), True, 'import matplotlib as _mpl\n'), ((104122, 104138), 'numpy.copy', '_np.copy', (['coeffs'], {}), '(coeffs)\n', (104130, 104138), True, 'import numpy as _np\n'), ((108893, 108909), 'numpy.rad2deg', '_np.rad2deg', (['lat'], {}), '(lat)\n', (108904, 108909), True, 'import numpy as _np\n'), ((108930, 108946), 'numpy.rad2deg', '_np.rad2deg', (['lon'], {}), '(lon)\n', (108941, 108946), True, 'import numpy as _np\n'), ((111673, 111689), 'numpy.copy', '_np.copy', (['coeffs'], {}), '(coeffs)\n', (111681, 111689), True, 'import numpy as _np\n'), ((118972, 118988), 'numpy.rad2deg', '_np.rad2deg', (['lat'], {}), '(lat)\n', (118983, 118988), True, 'import numpy as _np\n'), ((119009, 119025), 'numpy.rad2deg', '_np.rad2deg', (['lon'], {}), '(lon)\n', (119020, 119025), True, 'import numpy as _np\n'), ((127918, 127959), 'numpy.zeros', '_np.zeros', (['(nlat, nlon)'], {'dtype': '_np.float_'}), '((nlat, nlon), dtype=_np.float_)\n', (127927, 127959), True, 'import numpy as _np\n'), ((127994, 128037), 'numpy.zeros', '_np.zeros', (['(nlat, nlon)'], {'dtype': '_np.complex_'}), '((nlat, nlon), dtype=_np.complex_)\n', (128003, 128037), True, 'import numpy as _np\n'), ((130092, 130110), 'numpy.deg2rad', '_np.deg2rad', (['theta'], {}), '(theta)\n', (130103, 130110), True, 'import numpy as _np\n'), ((130130, 130147), 'numpy.deg2rad', '_np.deg2rad', (['clat'], {}), '(clat)\n', (130141, 130147), True, 'import numpy as _np\n'), ((130167, 130184), 'numpy.deg2rad', '_np.deg2rad', (['clon'], {}), '(clon)\n', (130178, 130184), True, 'import numpy as _np\n'), ((130594, 130607), 'numpy.cos', '_np.cos', (['clat'], {}), '(clat)\n', (130601, 130607), True, 'import numpy as _np\n'), ((130610, 130623), 'numpy.cos', '_np.cos', (['clon'], {}), '(clon)\n', (130617, 130623), True, 'import numpy as _np\n'), ((130636, 130649), 'numpy.cos', '_np.cos', (['clat'], {}), '(clat)\n', (130643, 130649), True, 'import numpy as _np\n'), ((130652, 130665), 'numpy.sin', '_np.sin', (['clon'], {}), '(clon)\n', (130659, 130665), True, 'import numpy as _np\n'), ((130814, 130830), 'numpy.cos', '_np.cos', (['lats[i]'], {}), '(lats[i])\n', (130821, 130830), True, 'import numpy as _np\n'), ((130852, 130868), 'numpy.sin', '_np.sin', (['lats[i]'], {}), '(lats[i])\n', (130859, 130868), True, 'import numpy as _np\n'), ((132399, 132427), 'numpy.loadtxt', '_np.loadtxt', (['fname'], {}), '(fname, **kwargs)\n', (132410, 132427), True, 'import numpy as _np\n'), ((135274, 135316), 'numpy.savetxt', '_np.savetxt', (['filename', 'self.data'], {}), '(filename, self.data, **kwargs)\n', (135285, 135316), True, 'import numpy as _np\n'), ((146823, 146842), 'numpy.isscalar', '_np.isscalar', (['other'], {}), '(other)\n', (146835, 146842), True, 'import numpy as _np\n'), ((150599, 150624), 'numpy.append', '_np.append', (['lats', '[-90.0]'], {}), '(lats, [-90.0])\n', (150609, 150624), True, 'import numpy as _np\n'), ((151523, 151554), 'numpy.zeros', '_np.zeros', (['(nlat + 1, nlon + 1)'], {}), '((nlat + 1, nlon + 1))\n', (151532, 151554), True, 'import numpy as _np\n'), ((151627, 151645), 'numpy.mean', '_np.mean', (['data[-1]'], {}), '(data[-1])\n', (151635, 151645), True, 'import numpy as _np\n'), ((151762, 151793), 'numpy.zeros', '_np.zeros', (['(nlat + 2, nlon + 1)'], {}), '((nlat + 2, nlon + 1))\n', (151771, 151793), True, 'import numpy as _np\n'), ((151866, 151883), 'numpy.mean', '_np.mean', (['data[0]'], {}), '(data[0])\n', (151874, 151883), True, 'import numpy as _np\n'), ((151931, 151949), 'numpy.mean', '_np.mean', (['data[-1]'], {}), '(data[-1])\n', (151939, 151949), True, 'import numpy as _np\n'), ((152264, 152282), 'numpy.abs', '_np.abs', (['magn_face'], {}), '(magn_face)\n', (152271, 152282), True, 'import numpy as _np\n'), ((152316, 152335), 'numpy.abs', '_np.abs', (['magn_point'], {}), '(magn_point)\n', (152323, 152335), True, 'import numpy as _np\n'), ((175599, 175614), 'numpy.copy', '_np.copy', (['array'], {}), '(array)\n', (175607, 175614), True, 'import numpy as _np\n'), ((175793, 175833), 'numpy.linspace', '_np.linspace', (['(90.0)', '(-90.0)'], {'num': 'self.nlat'}), '(90.0, -90.0, num=self.nlat)\n', (175805, 175833), True, 'import numpy as _np\n'), ((175867, 175927), 'numpy.linspace', '_np.linspace', (['(90.0)', '(-90.0 + 180.0 / self.nlat)'], {'num': 'self.nlat'}), '(90.0, -90.0 + 180.0 / self.nlat, num=self.nlat)\n', (175879, 175927), True, 'import numpy as _np\n'), ((176083, 176122), 'numpy.linspace', '_np.linspace', (['(0.0)', '(360.0)'], {'num': 'self.nlon'}), '(0.0, 360.0, num=self.nlon)\n', (176095, 176122), True, 'import numpy as _np\n'), ((176156, 176215), 'numpy.linspace', '_np.linspace', (['(0.0)', '(360.0 - 360.0 / self.nlon)'], {'num': 'self.nlon'}), '(0.0, 360.0 - 360.0 / self.nlon, num=self.nlon)\n', (176168, 176215), True, 'import numpy as _np\n'), ((178384, 178412), 'matplotlib.pyplot.figure', '_plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (178395, 178412), True, 'import matplotlib.pyplot as _plt\n'), ((178959, 179027), 'numpy.linspace', '_np.linspace', (['(0)', '(360)'], {'num': '(360 // tick_interval[0] + 1)', 'endpoint': '(True)'}), '(0, 360, num=360 // tick_interval[0] + 1, endpoint=True)\n', (178971, 179027), True, 'import numpy as _np\n'), ((179154, 179223), 'numpy.linspace', '_np.linspace', (['(-90)', '(90)'], {'num': '(180 // tick_interval[1] + 1)', 'endpoint': '(True)'}), '(-90, 90, num=180 // tick_interval[1] + 1, endpoint=True)\n', (179166, 179223), True, 'import numpy as _np\n'), ((179368, 179442), 'numpy.linspace', '_np.linspace', (['(0)', '(360)'], {'num': '(360 // minor_tick_interval[0] + 1)', 'endpoint': '(True)'}), '(0, 360, num=360 // minor_tick_interval[0] + 1, endpoint=True)\n', (179380, 179442), True, 'import numpy as _np\n'), ((179570, 179645), 'numpy.linspace', '_np.linspace', (['(-90)', '(90)'], {'num': '(180 // minor_tick_interval[1] + 1)', 'endpoint': '(True)'}), '(-90, 90, num=180 // minor_tick_interval[1] + 1, endpoint=True)\n', (179582, 179645), True, 'import numpy as _np\n'), ((180096, 180118), 'matplotlib.cm.get_cmap', '_mpl.cm.get_cmap', (['cmap'], {}), '(cmap)\n', (180112, 180118), True, 'import matplotlib as _mpl\n'), ((187265, 187291), 'mpl_toolkits.axes_grid1.make_axes_locatable', '_make_axes_locatable', (['axes'], {}), '(axes)\n', (187285, 187291), True, 'from mpl_toolkits.axes_grid1 import make_axes_locatable as _make_axes_locatable\n'), ((187883, 187956), 'matplotlib.pyplot.colorbar', '_plt.colorbar', (['cim'], {'cax': 'cax', 'orientation': 'orientation', 'extend': 'cb_triangles'}), '(cim, cax=cax, orientation=orientation, extend=cb_triangles)\n', (187896, 187956), True, 'import matplotlib.pyplot as _plt\n'), ((195017, 195032), 'pygmt.Figure', '_pygmt.Figure', ([], {}), '()\n', (195030, 195032), True, 'import pygmt as _pygmt\n'), ((196679, 196694), 'numpy.copy', '_np.copy', (['array'], {}), '(array)\n', (196687, 196694), True, 'import numpy as _np\n'), ((196931, 196971), 'numpy.linspace', '_np.linspace', (['(90.0)', '(-90.0)'], {'num': 'self.nlat'}), '(90.0, -90.0, num=self.nlat)\n', (196943, 196971), True, 'import numpy as _np\n'), ((197005, 197065), 'numpy.linspace', '_np.linspace', (['(90.0)', '(-90.0 + 180.0 / self.nlat)'], {'num': 'self.nlat'}), '(90.0, -90.0 + 180.0 / self.nlat, num=self.nlat)\n', (197017, 197065), True, 'import numpy as _np\n'), ((197279, 197318), 'numpy.linspace', '_np.linspace', (['(0.0)', '(360.0)'], {'num': 'self.nlon'}), '(0.0, 360.0, num=self.nlon)\n', (197291, 197318), True, 'import numpy as _np\n'), ((197352, 197411), 'numpy.linspace', '_np.linspace', (['(0.0)', '(360.0 - 360.0 / self.nlon)'], {'num': 'self.nlon'}), '(0.0, 360.0 - 360.0 / self.nlon, num=self.nlon)\n', (197364, 197411), True, 'import numpy as _np\n'), ((199515, 199551), 'matplotlib.pyplot.subplots', '_plt.subplots', (['(2)', '(1)'], {'figsize': 'figsize'}), '(2, 1, figsize=figsize)\n', (199528, 199551), True, 'import matplotlib.pyplot as _plt\n'), ((202284, 202299), 'pygmt.Figure', '_pygmt.Figure', ([], {}), '()\n', (202297, 202299), True, 'import pygmt as _pygmt\n'), ((205907, 205922), 'numpy.copy', '_np.copy', (['array'], {}), '(array)\n', (205915, 205922), True, 'import numpy as _np\n'), ((206392, 206431), 'numpy.linspace', '_np.linspace', (['(0.0)', '(360.0)'], {'num': 'self.nlon'}), '(0.0, 360.0, num=self.nlon)\n', (206404, 206431), True, 'import numpy as _np\n'), ((206465, 206524), 'numpy.linspace', '_np.linspace', (['(0.0)', '(360.0 - 360.0 / self.nlon)'], {'num': 'self.nlon'}), '(0.0, 360.0 - 360.0 / self.nlon, num=self.nlon)\n', (206477, 206524), True, 'import numpy as _np\n'), ((208560, 208596), 'matplotlib.pyplot.subplots', '_plt.subplots', (['(1)', '(1)'], {'figsize': 'figsize'}), '(1, 1, figsize=figsize)\n', (208573, 208596), True, 'import matplotlib.pyplot as _plt\n'), ((208730, 208772), 'numpy.arange', '_np.arange', (['(0)', 'self.nlon', 'tick_interval[0]'], {}), '(0, self.nlon, tick_interval[0])\n', (208740, 208772), True, 'import numpy as _np\n'), ((208869, 208911), 'numpy.arange', '_np.arange', (['(0)', 'self.nlat', 'tick_interval[1]'], {}), '(0, self.nlat, tick_interval[1])\n', (208879, 208911), True, 'import numpy as _np\n'), ((209026, 209074), 'numpy.arange', '_np.arange', (['(0)', 'self.nlon', 'minor_tick_interval[0]'], {}), '(0, self.nlon, minor_tick_interval[0])\n', (209036, 209074), True, 'import numpy as _np\n'), ((209189, 209237), 'numpy.arange', '_np.arange', (['(0)', 'self.nlat', 'minor_tick_interval[1]'], {}), '(0, self.nlat, minor_tick_interval[1])\n', (209199, 209237), True, 'import numpy as _np\n'), ((209675, 209697), 'matplotlib.cm.get_cmap', '_mpl.cm.get_cmap', (['cmap'], {}), '(cmap)\n', (209691, 209697), True, 'import matplotlib as _mpl\n'), ((214937, 214963), 'mpl_toolkits.axes_grid1.make_axes_locatable', '_make_axes_locatable', (['axes'], {}), '(axes)\n', (214957, 214963), True, 'from mpl_toolkits.axes_grid1 import make_axes_locatable as _make_axes_locatable\n'), ((215555, 215628), 'matplotlib.pyplot.colorbar', '_plt.colorbar', (['cim'], {'cax': 'cax', 'orientation': 'orientation', 'extend': 'cb_triangles'}), '(cim, cax=cax, orientation=orientation, extend=cb_triangles)\n', (215568, 215628), True, 'import matplotlib.pyplot as _plt\n'), ((218561, 218576), 'numpy.copy', '_np.copy', (['array'], {}), '(array)\n', (218569, 218576), True, 'import numpy as _np\n'), ((218940, 218979), 'numpy.linspace', '_np.linspace', (['(0.0)', '(360.0)'], {'num': 'self.nlon'}), '(0.0, 360.0, num=self.nlon)\n', (218952, 218979), True, 'import numpy as _np\n'), ((219013, 219072), 'numpy.linspace', '_np.linspace', (['(0.0)', '(360.0 - 360.0 / self.nlon)'], {'num': 'self.nlon'}), '(0.0, 360.0 - 360.0 / self.nlon, num=self.nlon)\n', (219025, 219072), True, 'import numpy as _np\n'), ((221141, 221177), 'matplotlib.pyplot.subplots', '_plt.subplots', (['(2)', '(1)'], {'figsize': 'figsize'}), '(2, 1, figsize=figsize)\n', (221154, 221177), True, 'import matplotlib.pyplot as _plt\n'), ((15951, 15985), 'numpy.random.normal', '_np.random.normal', ([], {'size': '(2, l + 1)'}), '(size=(2, l + 1))\n', (15968, 15985), True, 'import numpy as _np\n'), ((16125, 16162), 'numpy.zeros', '_np.zeros', (['(2, nl, nl)'], {'dtype': 'complex'}), '((2, nl, nl), dtype=complex)\n', (16134, 16162), True, 'import numpy as _np\n'), ((16526, 16561), 'numpy.sqrt', '_np.sqrt', (['(power[0:nl] / power_per_l)'], {}), '(power[0:nl] / power_per_l)\n', (16534, 16561), True, 'import numpy as _np\n'), ((16644, 16685), 'numpy.sqrt', '_np.sqrt', (['(power[0:nl] / (2 * degrees + 1))'], {}), '(power[0:nl] / (2 * degrees + 1))\n', (16652, 16685), True, 'import numpy as _np\n'), ((22026, 22051), 'numpy.load', '_np.load', (['fname'], {}), '(fname, **kwargs)\n', (22034, 22051), True, 'import numpy as _np\n'), ((31932, 31943), 'numpy.abs', '_np.abs', (['ms'], {}), '(ms)\n', (31939, 31943), True, 'import numpy as _np\n'), ((33890, 33931), 'numpy.save', '_np.save', (['filename', 'self.coeffs'], {}), '(filename, self.coeffs, **kwargs)\n', (33898, 33931), True, 'import numpy as _np\n'), ((38384, 38453), 'numpy.empty', '_np.empty', (['[2, self.lmax + 1, self.lmax + 1]'], {'dtype': 'self.coeffs.dtype'}), '([2, self.lmax + 1, self.lmax + 1], dtype=self.coeffs.dtype)\n', (38393, 38453), True, 'import numpy as _np\n'), ((38985, 39004), 'numpy.isscalar', '_np.isscalar', (['other'], {}), '(other)\n', (38997, 39004), True, 'import numpy as _np\n'), ((40327, 40396), 'numpy.empty', '_np.empty', (['[2, self.lmax + 1, self.lmax + 1]'], {'dtype': 'self.coeffs.dtype'}), '([2, self.lmax + 1, self.lmax + 1], dtype=self.coeffs.dtype)\n', (40336, 40396), True, 'import numpy as _np\n'), ((40928, 40947), 'numpy.isscalar', '_np.isscalar', (['other'], {}), '(other)\n', (40940, 40947), True, 'import numpy as _np\n'), ((42060, 42129), 'numpy.empty', '_np.empty', (['[2, self.lmax + 1, self.lmax + 1]'], {'dtype': 'self.coeffs.dtype'}), '([2, self.lmax + 1, self.lmax + 1], dtype=self.coeffs.dtype)\n', (42069, 42129), True, 'import numpy as _np\n'), ((42661, 42680), 'numpy.isscalar', '_np.isscalar', (['other'], {}), '(other)\n', (42673, 42680), True, 'import numpy as _np\n'), ((43673, 43742), 'numpy.empty', '_np.empty', (['[2, self.lmax + 1, self.lmax + 1]'], {'dtype': 'self.coeffs.dtype'}), '([2, self.lmax + 1, self.lmax + 1], dtype=self.coeffs.dtype)\n', (43682, 43742), True, 'import numpy as _np\n'), ((44274, 44293), 'numpy.isscalar', '_np.isscalar', (['other'], {}), '(other)\n', (44286, 44293), True, 'import numpy as _np\n'), ((44324, 44393), 'numpy.empty', '_np.empty', (['[2, self.lmax + 1, self.lmax + 1]'], {'dtype': 'self.coeffs.dtype'}), '([2, self.lmax + 1, self.lmax + 1], dtype=self.coeffs.dtype)\n', (44333, 44393), True, 'import numpy as _np\n'), ((45581, 45650), 'numpy.empty', '_np.empty', (['[2, self.lmax + 1, self.lmax + 1]'], {'dtype': 'self.coeffs.dtype'}), '([2, self.lmax + 1, self.lmax + 1], dtype=self.coeffs.dtype)\n', (45590, 45650), True, 'import numpy as _np\n'), ((46182, 46201), 'numpy.isscalar', '_np.isscalar', (['other'], {}), '(other)\n', (46194, 46201), True, 'import numpy as _np\n'), ((46232, 46301), 'numpy.empty', '_np.empty', (['[2, self.lmax + 1, self.lmax + 1]'], {'dtype': 'self.coeffs.dtype'}), '([2, self.lmax + 1, self.lmax + 1], dtype=self.coeffs.dtype)\n', (46241, 46301), True, 'import numpy as _np\n'), ((60864, 60898), 'numpy.array', '_np.array', (['[-gamma, -beta, -alpha]'], {}), '([-gamma, -beta, -alpha])\n', (60873, 60898), True, 'import numpy as _np\n'), ((60942, 60973), 'numpy.array', '_np.array', (['[alpha, beta, gamma]'], {}), '([alpha, beta, gamma])\n', (60951, 60973), True, 'import numpy as _np\n'), ((90659, 90679), 'numpy.arange', '_np.arange', (['(lmax + 2)'], {}), '(lmax + 2)\n', (90669, 90679), True, 'import numpy as _np\n'), ((91006, 91026), 'numpy.nanmax', '_np.nanmax', (['spectrum'], {}), '(spectrum)\n', (91016, 91026), True, 'import numpy as _np\n'), ((91058, 91078), 'numpy.nanmax', '_np.nanmax', (['spectrum'], {}), '(spectrum)\n', (91068, 91078), True, 'import numpy as _np\n'), ((91234, 91251), 'numpy.nanmin', '_np.nanmin', (['_temp'], {}), '(_temp)\n', (91244, 91251), True, 'import numpy as _np\n'), ((91304, 91324), 'numpy.nanmax', '_np.nanmax', (['spectrum'], {}), '(spectrum)\n', (91314, 91324), True, 'import numpy as _np\n'), ((91548, 91574), 'matplotlib.pyplot.Normalize', '_plt.Normalize', (['vmin', 'vmax'], {}), '(vmin, vmax)\n', (91562, 91574), True, 'import matplotlib.pyplot as _plt\n'), ((100250, 100270), 'numpy.arange', '_np.arange', (['(lmax + 2)'], {}), '(lmax + 2)\n', (100260, 100270), True, 'import numpy as _np\n'), ((100597, 100617), 'numpy.nanmax', '_np.nanmax', (['spectrum'], {}), '(spectrum)\n', (100607, 100617), True, 'import numpy as _np\n'), ((100649, 100669), 'numpy.nanmax', '_np.nanmax', (['spectrum'], {}), '(spectrum)\n', (100659, 100669), True, 'import numpy as _np\n'), ((100825, 100842), 'numpy.nanmin', '_np.nanmin', (['_temp'], {}), '(_temp)\n', (100835, 100842), True, 'import numpy as _np\n'), ((100895, 100915), 'numpy.nanmax', '_np.nanmax', (['spectrum'], {}), '(spectrum)\n', (100905, 100915), True, 'import numpy as _np\n'), ((101139, 101165), 'matplotlib.pyplot.Normalize', '_plt.Normalize', (['vmin', 'vmax'], {}), '(vmin, vmax)\n', (101153, 101165), True, 'import matplotlib.pyplot as _plt\n'), ((109542, 109574), 'numpy.empty_like', '_np.empty_like', (['lat'], {'dtype': 'float'}), '(lat, dtype=float)\n', (109556, 109574), True, 'import numpy as _np\n'), ((109617, 109675), 'numpy.nditer', '_np.nditer', (['[values, latin, lonin]'], {'op_flags': "['readwrite']"}), "([values, latin, lonin], op_flags=['readwrite'])\n", (109627, 109675), True, 'import numpy as _np\n'), ((112492, 112512), 'numpy.arange', '_np.arange', (['(1)', '(l + 1)'], {}), '(1, l + 1)\n', (112502, 112512), True, 'import numpy as _np\n'), ((119624, 119662), 'numpy.empty_like', '_np.empty_like', (['lat'], {'dtype': '_np.complex'}), '(lat, dtype=_np.complex)\n', (119638, 119662), True, 'import numpy as _np\n'), ((119705, 119763), 'numpy.nditer', '_np.nditer', (['[values, latin, lonin]'], {'op_flags': "['readwrite']"}), "([values, latin, lonin], op_flags=['readwrite'])\n", (119715, 119763), True, 'import numpy as _np\n'), ((132476, 132501), 'numpy.load', '_np.load', (['fname'], {}), '(fname, **kwargs)\n', (132484, 132501), True, 'import numpy as _np\n'), ((135358, 135397), 'numpy.save', '_np.save', (['filename', 'self.data'], {}), '(filename, self.data, **kwargs)\n', (135366, 135397), True, 'import numpy as _np\n'), ((141875, 141894), 'numpy.isscalar', '_np.isscalar', (['other'], {}), '(other)\n', (141887, 141894), True, 'import numpy as _np\n'), ((143032, 143051), 'numpy.isscalar', '_np.isscalar', (['other'], {}), '(other)\n', (143044, 143051), True, 'import numpy as _np\n'), ((144056, 144075), 'numpy.isscalar', '_np.isscalar', (['other'], {}), '(other)\n', (144068, 144075), True, 'import numpy as _np\n'), ((145079, 145098), 'numpy.isscalar', '_np.isscalar', (['other'], {}), '(other)\n', (145091, 145098), True, 'import numpy as _np\n'), ((146252, 146271), 'numpy.isscalar', '_np.isscalar', (['other'], {}), '(other)\n', (146264, 146271), True, 'import numpy as _np\n'), ((150296, 150314), 'numpy.abs', '_np.abs', (['self.data'], {}), '(self.data)\n', (150303, 150314), True, 'import numpy as _np\n'), ((150724, 150759), 'numpy.hstack', '_np.hstack', (['([90.0], lats, [-90.0])'], {}), '(([90.0], lats, [-90.0]))\n', (150734, 150759), True, 'import numpy as _np\n'), ((151092, 151102), 'numpy.sin', '_np.sin', (['v'], {}), '(v)\n', (151099, 151102), True, 'import numpy as _np\n'), ((151114, 151124), 'numpy.cos', '_np.cos', (['u'], {}), '(u)\n', (151121, 151124), True, 'import numpy as _np\n'), ((151146, 151156), 'numpy.sin', '_np.sin', (['v'], {}), '(v)\n', (151153, 151156), True, 'import numpy as _np\n'), ((151168, 151178), 'numpy.sin', '_np.sin', (['u'], {}), '(u)\n', (151175, 151178), True, 'import numpy as _np\n'), ((151200, 151210), 'numpy.cos', '_np.cos', (['v'], {}), '(v)\n', (151207, 151210), True, 'import numpy as _np\n'), ((151222, 151250), 'numpy.ones_like', '_np.ones_like', (['lons_circular'], {}), '(lons_circular)\n', (151235, 151250), True, 'import numpy as _np\n'), ((180028, 180055), 'matplotlib.cm.get_cmap', '_mpl.cm.get_cmap', (['cmap', 'num'], {}), '(cmap, num)\n', (180044, 180055), True, 'import matplotlib as _mpl\n'), ((180397, 180411), 'numpy.sign', '_np.sign', (['vmin'], {}), '(vmin)\n', (180405, 180411), True, 'import numpy as _np\n'), ((180663, 180677), 'numpy.sign', '_np.sign', (['vmax'], {}), '(vmax)\n', (180671, 180677), True, 'import numpy as _np\n'), ((181131, 181145), 'numpy.sign', '_np.sign', (['vmin'], {}), '(vmin)\n', (181139, 181145), True, 'import numpy as _np\n'), ((181421, 181435), 'numpy.sign', '_np.sign', (['vmax'], {}), '(vmax)\n', (181429, 181435), True, 'import numpy as _np\n'), ((184962, 185005), 'matplotlib.ticker.FormatStrFormatter', '_mpl.ticker.FormatStrFormatter', (["('%d' + u'°')"], {}), "('%d' + u'°')\n", (184992, 185005), True, 'import matplotlib as _mpl\n'), ((185070, 185113), 'matplotlib.ticker.FormatStrFormatter', '_mpl.ticker.FormatStrFormatter', (["('%d' + u'°')"], {}), "('%d' + u'°')\n", (185100, 185113), True, 'import matplotlib as _mpl\n'), ((209607, 209634), 'matplotlib.cm.get_cmap', '_mpl.cm.get_cmap', (['cmap', 'num'], {}), '(cmap, num)\n', (209623, 209634), True, 'import matplotlib as _mpl\n'), ((209976, 209990), 'numpy.sign', '_np.sign', (['vmin'], {}), '(vmin)\n', (209984, 209990), True, 'import numpy as _np\n'), ((210242, 210256), 'numpy.sign', '_np.sign', (['vmax'], {}), '(vmax)\n', (210250, 210256), True, 'import numpy as _np\n'), ((210710, 210724), 'numpy.sign', '_np.sign', (['vmin'], {}), '(vmin)\n', (210718, 210724), True, 'import numpy as _np\n'), ((211000, 211014), 'numpy.sign', '_np.sign', (['vmax'], {}), '(vmax)\n', (211008, 211014), True, 'import numpy as _np\n'), ((39053, 39076), 'numpy.iscomplexobj', '_np.iscomplexobj', (['other'], {}), '(other)\n', (39069, 39076), True, 'import numpy as _np\n'), ((40996, 41019), 'numpy.iscomplexobj', '_np.iscomplexobj', (['other'], {}), '(other)\n', (41012, 41019), True, 'import numpy as _np\n'), ((42729, 42752), 'numpy.iscomplexobj', '_np.iscomplexobj', (['other'], {}), '(other)\n', (42745, 42752), True, 'import numpy as _np\n'), ((44460, 44483), 'numpy.iscomplexobj', '_np.iscomplexobj', (['other'], {}), '(other)\n', (44476, 44483), True, 'import numpy as _np\n'), ((46368, 46391), 'numpy.iscomplexobj', '_np.iscomplexobj', (['other'], {}), '(other)\n', (46384, 46391), True, 'import numpy as _np\n'), ((61060, 61120), 'numpy.array', '_np.array', (['[-gamma - _np.pi / 2, -beta, -alpha + _np.pi / 2]'], {}), '([-gamma - _np.pi / 2, -beta, -alpha + _np.pi / 2])\n', (61069, 61120), True, 'import numpy as _np\n'), ((61196, 61253), 'numpy.array', '_np.array', (['[alpha - _np.pi / 2, beta, gamma + _np.pi / 2]'], {}), '([alpha - _np.pi / 2, beta, gamma + _np.pi / 2])\n', (61205, 61253), True, 'import numpy as _np\n'), ((131007, 131023), 'numpy.arccos', '_np.arccos', (['dist'], {}), '(dist)\n', (131017, 131023), True, 'import numpy as _np\n'), ((141943, 141966), 'numpy.iscomplexobj', '_np.iscomplexobj', (['other'], {}), '(other)\n', (141959, 141966), True, 'import numpy as _np\n'), ((143100, 143123), 'numpy.iscomplexobj', '_np.iscomplexobj', (['other'], {}), '(other)\n', (143116, 143123), True, 'import numpy as _np\n'), ((144124, 144147), 'numpy.iscomplexobj', '_np.iscomplexobj', (['other'], {}), '(other)\n', (144140, 144147), True, 'import numpy as _np\n'), ((145147, 145170), 'numpy.iscomplexobj', '_np.iscomplexobj', (['other'], {}), '(other)\n', (145163, 145170), True, 'import numpy as _np\n'), ((146320, 146343), 'numpy.iscomplexobj', '_np.iscomplexobj', (['other'], {}), '(other)\n', (146336, 146343), True, 'import numpy as _np\n'), ((180519, 180533), 'numpy.sign', '_np.sign', (['vmin'], {}), '(vmin)\n', (180527, 180533), True, 'import numpy as _np\n'), ((180788, 180802), 'numpy.sign', '_np.sign', (['vmax'], {}), '(vmax)\n', (180796, 180802), True, 'import numpy as _np\n'), ((181265, 181279), 'numpy.sign', '_np.sign', (['vmin'], {}), '(vmin)\n', (181273, 181279), True, 'import numpy as _np\n'), ((181558, 181572), 'numpy.sign', '_np.sign', (['vmax'], {}), '(vmax)\n', (181566, 181572), True, 'import numpy as _np\n'), ((183260, 183300), 'cartopy.crs.PlateCarree', '_ccrs.PlateCarree', ([], {'central_longitude': '(0.0)'}), '(central_longitude=0.0)\n', (183277, 183300), True, 'import cartopy.crs as _ccrs\n'), ((184016, 184037), 'cartopy.mpl.ticker.LongitudeFormatter', '_LongitudeFormatter', ([], {}), '()\n', (184035, 184037), True, 'from cartopy.mpl.ticker import LongitudeFormatter as _LongitudeFormatter\n'), ((184086, 184106), 'cartopy.mpl.ticker.LatitudeFormatter', '_LatitudeFormatter', ([], {}), '()\n', (184104, 184106), True, 'from cartopy.mpl.ticker import LatitudeFormatter as _LatitudeFormatter\n'), ((206137, 206159), 'numpy.arccos', '_np.arccos', (['self.zeros'], {}), '(self.zeros)\n', (206147, 206159), True, 'import numpy as _np\n'), ((210098, 210112), 'numpy.sign', '_np.sign', (['vmin'], {}), '(vmin)\n', (210106, 210112), True, 'import numpy as _np\n'), ((210367, 210381), 'numpy.sign', '_np.sign', (['vmax'], {}), '(vmax)\n', (210375, 210381), True, 'import numpy as _np\n'), ((210844, 210858), 'numpy.sign', '_np.sign', (['vmin'], {}), '(vmin)\n', (210852, 210858), True, 'import numpy as _np\n'), ((211137, 211151), 'numpy.sign', '_np.sign', (['vmax'], {}), '(vmax)\n', (211145, 211151), True, 'import numpy as _np\n'), ((218738, 218760), 'numpy.arccos', '_np.arccos', (['self.zeros'], {}), '(self.zeros)\n', (218748, 218760), True, 'import numpy as _np\n'), ((16387, 16400), 'numpy.sqrt', '_np.sqrt', (['(2.0)'], {}), '(2.0)\n', (16395, 16400), True, 'import numpy as _np\n'), ((183548, 183588), 'cartopy.crs.PlateCarree', '_ccrs.PlateCarree', ([], {'central_longitude': '(0.0)'}), '(central_longitude=0.0)\n', (183565, 183588), True, 'import cartopy.crs as _ccrs\n'), ((183655, 183695), 'cartopy.crs.PlateCarree', '_ccrs.PlateCarree', ([], {'central_longitude': '(0.0)'}), '(central_longitude=0.0)\n', (183672, 183695), True, 'import cartopy.crs as _ccrs\n'), ((183791, 183831), 'cartopy.crs.PlateCarree', '_ccrs.PlateCarree', ([], {'central_longitude': '(0.0)'}), '(central_longitude=0.0)\n', (183808, 183831), True, 'import cartopy.crs as _ccrs\n'), ((183927, 183967), 'cartopy.crs.PlateCarree', '_ccrs.PlateCarree', ([], {'central_longitude': '(0.0)'}), '(central_longitude=0.0)\n', (183944, 183967), True, 'import cartopy.crs as _ccrs\n'), ((184278, 184318), 'cartopy.crs.PlateCarree', '_ccrs.PlateCarree', ([], {'central_longitude': '(0.0)'}), '(central_longitude=0.0)\n', (184295, 184318), True, 'import cartopy.crs as _ccrs\n'), ((16232, 16266), 'numpy.random.normal', '_np.random.normal', ([], {'size': '(2, l + 1)'}), '(size=(2, l + 1))\n', (16249, 16266), True, 'import numpy as _np\n'), ((16311, 16345), 'numpy.random.normal', '_np.random.normal', ([], {'size': '(2, l + 1)'}), '(size=(2, l + 1))\n', (16328, 16345), True, 'import numpy as _np\n'), ((89658, 89675), 'numpy.arange', '_np.arange', (['(l + 1)'], {}), '(l + 1)\n', (89668, 89675), True, 'import numpy as _np\n'), ((99249, 99266), 'numpy.arange', '_np.arange', (['(l + 1)'], {}), '(l + 1)\n', (99259, 99266), True, 'import numpy as _np\n'), ((89736, 89754), 'scipy.special.factorial', '_factorial', (['(l - ms)'], {}), '(l - ms)\n', (89746, 89754), True, 'from scipy.special import factorial as _factorial\n'), ((99327, 99345), 'scipy.special.factorial', '_factorial', (['(l - ms)'], {}), '(l - ms)\n', (99337, 99345), True, 'from scipy.special import factorial as _factorial\n'), ((89701, 89719), 'scipy.special.factorial', '_factorial', (['(l + ms)'], {}), '(l + ms)\n', (89711, 89719), True, 'from scipy.special import factorial as _factorial\n'), ((99292, 99310), 'scipy.special.factorial', '_factorial', (['(l + ms)'], {}), '(l + ms)\n', (99302, 99310), True, 'from scipy.special import factorial as _factorial\n')] |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scoring functions relating to the buoy."""
from makani.lib.python.batch_sim import scoring_functions
from makani.lib.python.h5_utils import numpy_utils
import numpy as np
class BuoyWaterLineScoringFunction(
scoring_functions.SingleSidedLimitScoringFunction):
"""Score to evaluate the highest point that the water line reaches."""
def __init__(self, good_limit, bad_limit, severity):
super(BuoyWaterLineScoringFunction, self).__init__(
'Buoy Min. Water Line Distance to Threshold', 'm', good_limit,
bad_limit, severity)
assert good_limit > bad_limit
def GetSystemLabels(self):
return ['offshore', 'buoy']
def GetValue(self, output):
return output['water_line_min']
def GetOutput(self, timeseries):
return {'water_line_min': np.min(timeseries['water_line'])}
def GetTimeSeries(self, params, sim, control):
water_line = self._SelectTelemetry(sim, control, 'water_line')
return {'water_line': water_line}
class BuoyYawAngleScoringFunction(
scoring_functions.SingleSidedLimitScoringFunction):
"""Score to evaluate the maximum/minimum yaw angle."""
def __init__(self, good_limit, bad_limit, severity):
super(BuoyYawAngleScoringFunction, self).__init__(
'Buoy Peak Yaw Angle From Equilibrium', 'deg', good_limit,
bad_limit, severity)
assert good_limit < bad_limit
def GetSystemLabels(self):
return ['offshore', 'buoy']
def GetValue(self, output):
return output['peak_buoy_yaw_angle_deg']
def GetOutput(self, timeseries):
buoy_yaw_angle_from_eq_deg = timeseries['buoy_yaw_angle_from_eq_deg']
return {'peak_buoy_yaw_angle_deg': np.max(
np.fabs(buoy_yaw_angle_from_eq_deg))}
def GetTimeSeries(self, params, sim, control):
buoy_yaw_angle_from_eq = self._SelectTelemetry(sim, control,
'buoy_yaw_angle_from_eq')
return {'buoy_yaw_angle_from_eq_deg': np.degrees(buoy_yaw_angle_from_eq)}
class BuoyVesselOriginAccelScoringFunction(
scoring_functions.SingleSidedLimitScoringFunction):
"""Score to evaluate the maximum acceleration of the vessel frame origin."""
def __init__(self, good_limit, bad_limit, severity):
super(BuoyVesselOriginAccelScoringFunction, self).__init__(
'Buoy Vessel Origin Acceleration', 'g', good_limit,
bad_limit, severity)
assert good_limit < bad_limit
def GetSystemLabels(self):
return ['offshore', 'buoy']
def GetValue(self, output):
return output['peak_buoy_accel_norm_gs']
def GetOutput(self, timeseries):
buoy_accel_norm_gs = timeseries['buoy_accel_norm_gs']
return {'peak_buoy_accel_norm_gs': np.max(buoy_accel_norm_gs)}
def GetTimeSeries(self, params, sim, control):
buoy_accel_g = self._SelectTelemetry(sim, control, 'buoy_accel_g')
try:
buoy_accel_g_norm = np.sum(
np.abs(numpy_utils.Vec3ToArray(buoy_accel_g))**2, axis=-1)**(1./2)
except (TypeError, ValueError):
buoy_accel_g_norm = np.array([float('nan')])
return {'buoy_accel_norm_gs': buoy_accel_g_norm / 9.81}
| [
"makani.lib.python.h5_utils.numpy_utils.Vec3ToArray",
"numpy.degrees",
"numpy.min",
"numpy.fabs",
"numpy.max"
] | [((1371, 1403), 'numpy.min', 'np.min', (["timeseries['water_line']"], {}), "(timeseries['water_line'])\n", (1377, 1403), True, 'import numpy as np\n'), ((2526, 2560), 'numpy.degrees', 'np.degrees', (['buoy_yaw_angle_from_eq'], {}), '(buoy_yaw_angle_from_eq)\n', (2536, 2560), True, 'import numpy as np\n'), ((3257, 3283), 'numpy.max', 'np.max', (['buoy_accel_norm_gs'], {}), '(buoy_accel_norm_gs)\n', (3263, 3283), True, 'import numpy as np\n'), ((2254, 2289), 'numpy.fabs', 'np.fabs', (['buoy_yaw_angle_from_eq_deg'], {}), '(buoy_yaw_angle_from_eq_deg)\n', (2261, 2289), True, 'import numpy as np\n'), ((3467, 3504), 'makani.lib.python.h5_utils.numpy_utils.Vec3ToArray', 'numpy_utils.Vec3ToArray', (['buoy_accel_g'], {}), '(buoy_accel_g)\n', (3490, 3504), False, 'from makani.lib.python.h5_utils import numpy_utils\n')] |
import os.path
import numpy as np
import pandas as pd
import pytest
from packerlabimaging.utils.io import import_obj
LOCAL_DATA_PATH = '/Users/prajayshah/data/oxford-data-to-process/'
REMOTE_DATA_PATH = '/home/pshah/mnt/qnap/Data/'
BASE_PATH = LOCAL_DATA_PATH
SUITE2P_FRAMES_SPONT_t005t006 = [0, 14880]
SUITE2P_FRAMES_t013 = 103525
@pytest.fixture(scope="session")
def twophoton_imaging_trial_new_noPreDoneSuite2p_fixture():
"""apr 30 2022 - newest v0.2.0 structure"""
date = '2021-01-31'
prep = 'HF113'
# add information about each trial in experiment to trialsInformation field of the initialization_dict
trials_paq = {'t-001': '001.paq',
't-002': '002.paq',
't-003': '003.paq',
't-004': '004.paq'}
return trials_paq
@pytest.fixture(scope="session")
def twophoton_imaging_multitrial_noPreDoneSuite2p_fixture():
prep = 'HF113'
date = '2021-01-31'
trials_paq = {'t-001': '001.paq',
't-002': '002.paq',
't-003': '003.paq',
't-004': '004.paq'
}
info = {
'prep': prep,
'date': date,
'trials_paq': trials_paq
}
return info
@pytest.fixture(scope="session")
def twophoton_imaging_trial_fixture():
expobj = import_obj('/home/pshah/Documents/code/packerlabimaging/tests/RL109_analysis.pkl')
initialization_dict = {
'dataPath': '/home/pshah/mnt/qnap/Data/2020-12-19',
'saveDir': '/home/pshah/Documents/code/packerlabimaging/tests/',
'microscope': "Bruker",
"expID": 'RL109',
'date': '2020-12-19',
'comment': 'testing out analysis workflow',
'TrialsInformation': {}, # NOTE: this dictionary is populated in the code cells below.
# 'useSuite2p': True,
# 'useSuite2p': False,
's2pResultsPath': "/home/pshah/mnt/qnap/Analysis/2020-12-19/suite2p/alloptical-2p-1x-alltrials/plane0"
}
# add information about each trial in experiment to TrialsInformation field of the initialization_dict
trials_list_spont = ['t-005', 't-006']
for idx, trial in enumerate(trials_list_spont):
data_path_base = '/home/pshah/mnt/qnap/Data/2020-12-19'
animal_prep = initialization_dict['expID']
date = data_path_base[-10:]
# create dictionary containing necessary information for initialization
initialization_dict["TrialsInformation"][trial] = {'trialType': 'TwoPhotonImagingTrial',
'tiff_path': f'{data_path_base}/{date}_{trial}/{date}_{trial}_Cycle00001_Ch3.tif',
's2p_use': True,
'expGroup': "pre 4ap 2p spont imaging",
'PaqInfo': {
'frame_channel': "frame_clock",
'paq_path': f'{data_path_base}/{date}_{animal_prep}_{trial[2:]}.paq'
# path to the .paq files for the selected trials
}
}
_metainfo = {
'expID': initialization_dict['expID'],
'trialID': trial,
'date': initialization_dict['date'],
'TrialsInformation': initialization_dict["TrialsInformation"][trial]
}
initialization_dict['metainfo'] = _metainfo
initialization_dict['analysis_save_path'] = initialization_dict['saveDir']
initialization_dict['suite2p_experiment_obj'] = expobj.Suite2p
initialization_dict['paq_options'] = _metainfo['TrialsInformation']['PaqInfo']
initialization_dict['total_frames_stitched'] = SUITE2P_FRAMES_SPONT_t005t006[idx]
return initialization_dict
# @pytest.fixture(scope="session")
def alloptical_trial_fixture():
expobj = import_obj('/home/pshah/Documents/code/packerlabimaging/tests/RL109_analysis.pkl')
initialization_dict = {'naparm_path': f'{BASE_PATH}/2020-12-19/photostim/2020-12-19_RL109_ps_014/',
'dataPath': f'{BASE_PATH}/2020-12-19/2020-12-19_t-013/2020-12-19_t-013_Cycle00001_Ch3.tif',
'saveDir': f'{BASE_PATH}/2020-12-19/',
'date': '2020-12-19',
'trialID': 't-013',
'expID': 'RL109',
'expGroup': 'all optical trial with LFP',
'comment': ''}
return initialization_dict
@pytest.fixture(scope="session")
def experiment_fixture():
ExperimentMetainfo = {
'dataPath': '/mnt/qnap_share/Data/packerlabimaging-example/packerlabimaging-test-cellsdata',
'saveDir': '/mnt/qnap_share/Data/packerlabimaging-example/packerlabimaging-test-analysis',
"expID": 'HF113',
'comment': 'two photon imaging + LFP dataset',
}
return ExperimentMetainfo
@pytest.fixture(scope="session")
def existing_trialobj_twophotonimaging_fixture():
expobj = import_obj(
pkl_path='/mnt/qnap_share/Data/packerlabimaging-example/packerlabimaging-test-analysis/HF113_analysis.pkl')
trialobj1 = expobj.load_trial(expobj.trialIDs[0])
return trialobj1
@pytest.fixture(scope="session")
def existing_expobj_fixture():
expobj = import_obj(
pkl_path='/mnt/qnap_share/Data/packerlabimaging-example/test-analysis/HF113_analysis.pkl')
date = '2021-01-31'
return expobj, date
@pytest.fixture(scope="session")
def suite2p_results_fixture():
expobj = import_obj(pkl_path='/home/pshah/mnt/qnap/Analysis/2021-01-31/HF113/HF113_analysis.pkl')
s2p_path = f'/home/pshah/mnt/qnap/Analysis/2021-01-31/HF113//suite2p//plane0/'
assert os.path.exists(s2p_path), 's2p path not found...'
from packerlabimaging.processing.suite2p import s2p_loader
FminusFneu, spks, stat, neuropil = s2p_loader(s2p_path=s2p_path)
return FminusFneu, spks, stat, neuropil
@pytest.fixture(scope="session")
def existing_trialobj_alloptical_fixture():
expobj = import_obj(pkl_path='/home/pshah/Documents/code/packerlabimaging/tests/RL109_analysis.pkl')
trialobj = expobj.load_trial(trialID='t-013')
return trialobj
# @pytest.fixture(scope="session")
def existing_expobj_nopredones2p_fixture():
expobj = import_obj(pkl_path='/home/pshah/mnt/qnap/Analysis/2021-01-25/PS12/PS12_analysis.pkl')
return expobj
def anndata_trial_data():
import pandas as pd
import numpy as np
# number of observations
n_obs = 1000
# say we measure the time of observing the cellsdata points
# add them to a dataframe for storing some annotation
obs = pd.DataFrame()
obs['group'] = np.random.choice(['day 1', 'day 2', 'day 4', 'day 8'], n_obs)
obs['group2'] = np.random.choice(['day 3', 'day 5', 'day 7'], n_obs)
# set the names of variables/features to the following
# ['A', 'B', 'C', ..., 'AA', 'BB', 'CC', ..., 'AAA', ...]
from string import ascii_uppercase
var_names = [i * letter for i in range(1, 10) for letter in ascii_uppercase]
# number of variables
n_vars = len(var_names)
var_group = {'var_group_1': np.random.choice(['group A', 'group B', 'group C', 'group D'], n_vars),
'var_group_2': np.random.choice(['group A', 'group B', 'group C', 'group D'], n_vars)}
# dataframe for annotating the variables
var = pd.DataFrame(var_group, index=var_names)
# the cellsdata matrix of shape n_obs x n_vars
# X = np.arange(n_obs * n_vars).reshape(n_obs, n_vars)
X = np.random.random(n_obs * n_vars).reshape(n_obs, n_vars)
return X, var, obs
@pytest.fixture(scope='session')
def existing_anndata():
expobj = import_obj(pkl_path='/home/pshah/Documents/code/packerlabimaging/tests/RL109_analysis.pkl')
trialobj = expobj.load_trial(trialID=expobj.trialIDs[0])
print(trialobj.cellsdata) # this is the anndata object for this trial
var_meta = pd.DataFrame({
'exp_group': np.random.choice(['A', 'B', 'C'], trialobj.n_frames),
},
index=np.arange(trialobj.n_frames, dtype=int).astype(str), # these are the same IDs of observations as above!
)
trialobj.cellsdata.add_var(var_name='exp_group', values=list(var_meta['exp_group']))
print(trialobj.cellsdata)
@pytest.fixture(scope='session')
def s_tiff_path_fixture():
return '/home/pshah/mnt/qnap/Data/2021-01-31/2021-01-31_s-007/2021-01-31_s-007_Cycle00001_Ch2_000001.ome.tif'
@pytest.fixture(scope='session')
def tiff_path_fixture():
return '/home/pshah/mnt/qnap/Data/2021-01-31/2021-01-31_t-006/2021-01-31_t-006_Cycle00001_Ch3.tif'
| [
"pandas.DataFrame",
"packerlabimaging.processing.suite2p.s2p_loader",
"pytest.fixture",
"packerlabimaging.utils.io.import_obj",
"numpy.random.random",
"numpy.arange",
"numpy.random.choice"
] | [((340, 371), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (354, 371), False, 'import pytest\n'), ((810, 841), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (824, 841), False, 'import pytest\n'), ((1235, 1266), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1249, 1266), False, 'import pytest\n'), ((4760, 4791), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (4774, 4791), False, 'import pytest\n'), ((5165, 5196), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (5179, 5196), False, 'import pytest\n'), ((5466, 5497), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (5480, 5497), False, 'import pytest\n'), ((5705, 5736), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (5719, 5736), False, 'import pytest\n'), ((6193, 6224), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (6207, 6224), False, 'import pytest\n'), ((7866, 7897), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (7880, 7897), False, 'import pytest\n'), ((8526, 8557), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (8540, 8557), False, 'import pytest\n'), ((8702, 8733), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (8716, 8733), False, 'import pytest\n'), ((1319, 1406), 'packerlabimaging.utils.io.import_obj', 'import_obj', (['"""/home/pshah/Documents/code/packerlabimaging/tests/RL109_analysis.pkl"""'], {}), "(\n '/home/pshah/Documents/code/packerlabimaging/tests/RL109_analysis.pkl')\n", (1329, 1406), False, 'from packerlabimaging.utils.io import import_obj\n'), ((4100, 4187), 'packerlabimaging.utils.io.import_obj', 'import_obj', (['"""/home/pshah/Documents/code/packerlabimaging/tests/RL109_analysis.pkl"""'], {}), "(\n '/home/pshah/Documents/code/packerlabimaging/tests/RL109_analysis.pkl')\n", (4110, 4187), False, 'from packerlabimaging.utils.io import import_obj\n'), ((5260, 5388), 'packerlabimaging.utils.io.import_obj', 'import_obj', ([], {'pkl_path': '"""/mnt/qnap_share/Data/packerlabimaging-example/packerlabimaging-test-analysis/HF113_analysis.pkl"""'}), "(pkl_path=\n '/mnt/qnap_share/Data/packerlabimaging-example/packerlabimaging-test-analysis/HF113_analysis.pkl'\n )\n", (5270, 5388), False, 'from packerlabimaging.utils.io import import_obj\n'), ((5542, 5653), 'packerlabimaging.utils.io.import_obj', 'import_obj', ([], {'pkl_path': '"""/mnt/qnap_share/Data/packerlabimaging-example/test-analysis/HF113_analysis.pkl"""'}), "(pkl_path=\n '/mnt/qnap_share/Data/packerlabimaging-example/test-analysis/HF113_analysis.pkl'\n )\n", (5552, 5653), False, 'from packerlabimaging.utils.io import import_obj\n'), ((5781, 5874), 'packerlabimaging.utils.io.import_obj', 'import_obj', ([], {'pkl_path': '"""/home/pshah/mnt/qnap/Analysis/2021-01-31/HF113/HF113_analysis.pkl"""'}), "(pkl_path=\n '/home/pshah/mnt/qnap/Analysis/2021-01-31/HF113/HF113_analysis.pkl')\n", (5791, 5874), False, 'from packerlabimaging.utils.io import import_obj\n'), ((6116, 6145), 'packerlabimaging.processing.suite2p.s2p_loader', 's2p_loader', ([], {'s2p_path': 's2p_path'}), '(s2p_path=s2p_path)\n', (6126, 6145), False, 'from packerlabimaging.processing.suite2p import s2p_loader\n'), ((6282, 6378), 'packerlabimaging.utils.io.import_obj', 'import_obj', ([], {'pkl_path': '"""/home/pshah/Documents/code/packerlabimaging/tests/RL109_analysis.pkl"""'}), "(pkl_path=\n '/home/pshah/Documents/code/packerlabimaging/tests/RL109_analysis.pkl')\n", (6292, 6378), False, 'from packerlabimaging.utils.io import import_obj\n'), ((6538, 6629), 'packerlabimaging.utils.io.import_obj', 'import_obj', ([], {'pkl_path': '"""/home/pshah/mnt/qnap/Analysis/2021-01-25/PS12/PS12_analysis.pkl"""'}), "(pkl_path=\n '/home/pshah/mnt/qnap/Analysis/2021-01-25/PS12/PS12_analysis.pkl')\n", (6548, 6629), False, 'from packerlabimaging.utils.io import import_obj\n'), ((6897, 6911), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6909, 6911), True, 'import pandas as pd\n'), ((6931, 6992), 'numpy.random.choice', 'np.random.choice', (["['day 1', 'day 2', 'day 4', 'day 8']", 'n_obs'], {}), "(['day 1', 'day 2', 'day 4', 'day 8'], n_obs)\n", (6947, 6992), True, 'import numpy as np\n'), ((7013, 7065), 'numpy.random.choice', 'np.random.choice', (["['day 3', 'day 5', 'day 7']", 'n_obs'], {}), "(['day 3', 'day 5', 'day 7'], n_obs)\n", (7029, 7065), True, 'import numpy as np\n'), ((7624, 7664), 'pandas.DataFrame', 'pd.DataFrame', (['var_group'], {'index': 'var_names'}), '(var_group, index=var_names)\n', (7636, 7664), True, 'import pandas as pd\n'), ((7935, 8031), 'packerlabimaging.utils.io.import_obj', 'import_obj', ([], {'pkl_path': '"""/home/pshah/Documents/code/packerlabimaging/tests/RL109_analysis.pkl"""'}), "(pkl_path=\n '/home/pshah/Documents/code/packerlabimaging/tests/RL109_analysis.pkl')\n", (7945, 8031), False, 'from packerlabimaging.utils.io import import_obj\n'), ((7393, 7463), 'numpy.random.choice', 'np.random.choice', (["['group A', 'group B', 'group C', 'group D']", 'n_vars'], {}), "(['group A', 'group B', 'group C', 'group D'], n_vars)\n", (7409, 7463), True, 'import numpy as np\n'), ((7497, 7567), 'numpy.random.choice', 'np.random.choice', (["['group A', 'group B', 'group C', 'group D']", 'n_vars'], {}), "(['group A', 'group B', 'group C', 'group D'], n_vars)\n", (7513, 7567), True, 'import numpy as np\n'), ((7783, 7815), 'numpy.random.random', 'np.random.random', (['(n_obs * n_vars)'], {}), '(n_obs * n_vars)\n', (7799, 7815), True, 'import numpy as np\n'), ((8216, 8268), 'numpy.random.choice', 'np.random.choice', (["['A', 'B', 'C']", 'trialobj.n_frames'], {}), "(['A', 'B', 'C'], trialobj.n_frames)\n", (8232, 8268), True, 'import numpy as np\n'), ((8291, 8330), 'numpy.arange', 'np.arange', (['trialobj.n_frames'], {'dtype': 'int'}), '(trialobj.n_frames, dtype=int)\n', (8300, 8330), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""SHERIFS
Seismic Hazard and Earthquake Rates In Fault Systems
Version 1.2
@author: <NAME>
"""
import numpy as np
class bg():
"""
Extract the geometry and properities of the background.
"""
def geom(model_name,file_geom):
Lon_bg = []
Lat_bg = []
# manually defined in the file Background geometry
geom_bg = np.genfromtxt(file_geom,dtype=[('U100'),('f8'),('f8')],skip_header = 1)
column_model = list(map(lambda i : geom_bg[i][0],range(len(geom_bg))))
index_model = np.where(np.array(column_model) == model_name)[0]
Lon_bg = list(map(lambda i : geom_bg[i][1],index_model))
Lat_bg = list(map(lambda i : geom_bg[i][2],index_model))
if Lon_bg == 0:
print('Error!! Check your input background geometry')
return Lon_bg, Lat_bg
def prop(model_name,file_prop):
prop_bg = open(file_prop,'r').readlines()
# background general parameters read from the input file
nodalPlanes = []
hypoDepths = []
for line in prop_bg :
if line.split('\t')[0] == model_name:
if line.split('\t')[1] == 'upperSeismoDepth':
upperSeismoDepth = float(line.split('\t')[2])
if line.split('\t')[1] == 'lowerSeismoDepth':
lowerSeismoDepth = float(line.split('\t')[2])
if line.split('\t')[1] == 'ruptAspectRatio':
ruptAspectRatio = float(line.split('\t')[2])
if line.split('\t')[1] == 'nodalPlane':
nodalPlanes.append([float(line.split('\t')[2]),
float(line.split('\t')[3]),
float(line.split('\t')[4]),
float(line.split('\t')[5])])
if line.split('\t')[1] == 'hypoDepth':
hypoDepths.append([float(line.split('\t')[2]),
float(line.split('\t')[3])])
if len(str(nodalPlanes))==0:
print('Error!! Verify your Background parameters file')
return upperSeismoDepth, lowerSeismoDepth, ruptAspectRatio, nodalPlanes, hypoDepths
| [
"numpy.array",
"numpy.genfromtxt"
] | [((404, 471), 'numpy.genfromtxt', 'np.genfromtxt', (['file_geom'], {'dtype': "['U100', 'f8', 'f8']", 'skip_header': '(1)'}), "(file_geom, dtype=['U100', 'f8', 'f8'], skip_header=1)\n", (417, 471), True, 'import numpy as np\n'), ((598, 620), 'numpy.array', 'np.array', (['column_model'], {}), '(column_model)\n', (606, 620), True, 'import numpy as np\n')] |
# -*- coding: future_fstrings -*-
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pathlib
_COLS = ('wavelength', 'nlines', 'depth', 'fwhm',
'EW', 'EWerr', 'amplitude', 'sigma', 'mean')
class ARES:
def __init__(self, *arg, **kwargs):
self._config_created = False
self.kwargs = kwargs
self._create_config()
self.outputfile = self.kwargs.get('fileout', 'test.ares')
@classmethod
def from_config(cls, fname):
with open(fname) as lines:
kwargs = dict()
for line in lines:
line = line.split('=')
line = list(map(lambda s: s.replace(' ', ''), line))
kwargs[line[0]] = kwargs[1]
return cls(**kwargs)
def _create_config(self):
fout = f"specfits='{self.kwargs.get('specfits')}'\n"
fout += f"readlinedat='{self.kwargs.get('readlinedat', 'cdo.dat')}'\n"
fout += f"fileout='{self.kwargs.get('fileout', 'aresout.dat')}'\n"
fout += f"lambdai={self.kwargs.get('lambdai', 3000)}\n"
fout += f"lambdaf={self.kwargs.get('lambdaf', 8000)}\n"
fout += f"smoothder={self.kwargs.get('smoothder', 6)}\n"
fout += f"space={self.kwargs.get('space', 3.0)}\n"
fout += f"rejt={self.kwargs.get('rejt', '3;5764,5766,6047,6052,6068,6076')}\n"
fout += f"lineresol={self.kwargs.get('lineresol', 0.05)}\n"
fout += f"miniline={self.kwargs.get('miniline', 2)}\n"
fout += f"plots_flag={self.kwargs.get('plots_flag', 0)}\n"
fout += f"rvmask='{self.kwargs.get('rvmask', '3,6021.8,6024.06,6027.06,6024.06,20')}'\n"
with open('mine.opt', 'w') as f:
f.writelines(fout)
self._config_created = True
def run(self, verbose=False):
if not self._config_created:
self._create_config()
if verbose:
print('Running ARES...')
os.system('ARES > /dev/null')
if verbose:
print(f'Done! Result saved in {self.kwargs.get("fileout", "aresout.dat")}')
@staticmethod
def read_output(fname):
return ARESOutput(fname)
@staticmethod
def get_rv(fname='logARES.txt'):
with open(fname, 'r') as lines:
for line in lines:
if line.startswith('Velocidade radial'):
break
rv = line.rpartition(':')[-1]
return float(rv)
class ARESOutput:
def __init__(self, fname, *args, **kwargs):
self.fname = fname
self.df = pd.read_csv(self.fname, sep=r'\s+', header=None)
self.df.columns = _COLS
# df.set_index('wavelength', inplace=True)
def percent_diff(self, other, col):
"""Find the percent difference between two ARES output for a given column.
Is given by:
(self.df.col - other.df.col) / self.df.col * 100
Input
-----
other : ARESOutput object
The result from another spectrum
col : str
The col for which to calculate the difference
"""
if not isinstance(other, ARESOutput):
raise TypeError(f'other is of type {type(other)} which is not compatible for this method')
if not col in _COLS:
raise ValueError(f'The following columns are allowed: {_COLS}')
p = (self.df[col] - other.df[col]) / self.df[col] * 100
return p.values
def plot(self, col1, col2=None, *args, **kwargs):
if col2 is None:
col2 = col1
col1 = 'wavelength'
if not (col1 in _COLS) or not (col2 in _COLS):
raise ValueError(f'The following columns are allowed: {_COLS}')
plt.plot(self.df[col1], self.df[col2], 'o', *args, **kwargs)
plt.xlabel(col1)
plt.ylabel(col2)
def mse(self, other, col):
if not isinstance(other, ARESOutput):
raise TypeError(f'other is of type {type(other)} which is not compatible for this method')
if not col in _COLS:
raise ValueError(f'The following columns are allowed: {_COLS}')
N = max((len(self.df), len(other.df)))
df = self.df.join(other.df, how='outer', lsuffix='_l', rsuffix='_r')
return 1/N * np.sqrt((np.sum((df[col+'_l'] - df[col+'_r'])**2)))
def get_result(*args, **kwargs):
if not os.path.exists(kwargs.get('fileout')):
a = ARES(*args, **kwargs)
a.run(verbose=True)
return ARES.read_output(kwargs.get('fileout'))
return ARES.read_output(kwargs.get('fileout'))
def create_fname(spectrum, smoothder, space, star='sun'):
if 'espresso' in spectrum.lower():
folder = 'ESPRESSO'
elif 'pepsi' in spectrum.lower():
folder = 'PEPSE'
elif 'harps' in spectrum.lower():
folder = 'HARPS'
return f'../data/ARES/{folder}/{star}/smooth{smoothder}_space{space}.ares'
def setup_dirs():
pathlib.Path("../data/ARES/ESPRESSO/sun").mkdir(parents=True, exist_ok=True)
pathlib.Path("../data/ARES/PEPSI/sun").mkdir(parents=True, exist_ok=True)
pathlib.Path("../data/ARES/HARPS/sun").mkdir(parents=True, exist_ok=True)
if __name__ == "__main__":
setup_dirs()
spectra = ('../daniel/sun_espresso_s1d_rv.fits', '../daniel/sun_pepsi_1d_rv.fits')
smoothders = range(1, 10)
spaces = np.arange(1, 8, 0.1)
for spectrum in spectra:
for smoothder in smoothders:
for space in spaces:
config = {'specfits': spectrum,
'readlinedat': '../daniel/linelist_damp.rdb',
'fileout': create_fname(spectrum, smoothder, space),
'smoothder': smoothder,
'space': space}
output = get_result(**config) | [
"numpy.sum",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"os.system",
"pathlib.Path",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((5449, 5469), 'numpy.arange', 'np.arange', (['(1)', '(8)', '(0.1)'], {}), '(1, 8, 0.1)\n', (5458, 5469), True, 'import numpy as np\n'), ((1990, 2019), 'os.system', 'os.system', (['"""ARES > /dev/null"""'], {}), "('ARES > /dev/null')\n", (1999, 2019), False, 'import os\n'), ((2615, 2663), 'pandas.read_csv', 'pd.read_csv', (['self.fname'], {'sep': '"""\\\\s+"""', 'header': 'None'}), "(self.fname, sep='\\\\s+', header=None)\n", (2626, 2663), True, 'import pandas as pd\n'), ((3797, 3857), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df[col1]', 'self.df[col2]', '"""o"""', '*args'], {}), "(self.df[col1], self.df[col2], 'o', *args, **kwargs)\n", (3805, 3857), True, 'import matplotlib.pyplot as plt\n'), ((3867, 3883), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['col1'], {}), '(col1)\n', (3877, 3883), True, 'import matplotlib.pyplot as plt\n'), ((3893, 3909), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['col2'], {}), '(col2)\n', (3903, 3909), True, 'import matplotlib.pyplot as plt\n'), ((5033, 5074), 'pathlib.Path', 'pathlib.Path', (['"""../data/ARES/ESPRESSO/sun"""'], {}), "('../data/ARES/ESPRESSO/sun')\n", (5045, 5074), False, 'import pathlib\n'), ((5115, 5153), 'pathlib.Path', 'pathlib.Path', (['"""../data/ARES/PEPSI/sun"""'], {}), "('../data/ARES/PEPSI/sun')\n", (5127, 5153), False, 'import pathlib\n'), ((5194, 5232), 'pathlib.Path', 'pathlib.Path', (['"""../data/ARES/HARPS/sun"""'], {}), "('../data/ARES/HARPS/sun')\n", (5206, 5232), False, 'import pathlib\n'), ((4359, 4405), 'numpy.sum', 'np.sum', (["((df[col + '_l'] - df[col + '_r']) ** 2)"], {}), "((df[col + '_l'] - df[col + '_r']) ** 2)\n", (4365, 4405), True, 'import numpy as np\n')] |
import copy
import pickle
import sys
from pathlib import Path
from sklearn.cluster import KMeans, DBSCAN
import numpy as np
from skimage import io
import mayavi.mlab as mlab
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
from ..ops.roiaware_pool3d import roiaware_pool3d_utils
from ..utils import (
box_utils,
calibration_kitti,
common_utils,
object3d_kitti,
point_box_utils,
)
from .dataset import DatasetTemplate
import torch
from sklearn.manifold import TSNE
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
sys.path.append("/home/xharlie/dev/occlusion_pcd/tools/visual_utils")
import visualize_utils as vu
from PIL import ImageColor
from ..ops.chamfer_distance import ChamferDistance
from ..ops.iou3d_nms import iou3d_nms_utils
chamfer_dist = ChamferDistance()
NUM_POINT_FEATURES = 4
def extract_allpnts(
root_path=None, splits=["train", "val"], type="Car", apply_mirror=True
):
"""
Grab points of objects with `type` matched to input and their info from `kitti_dbinfos_%s.pkl`, where s is the split name.\n
Return:
* `all_db_infos_lst`: A list of all dictionary of objects stored in `kitti_dbinfos_%s.pkl`.
* `box_dims_lst`: A list of (0, 0, 0, dx, dy, dz, 0), i.e., the size of the box with position set to origin and heading set to 0.
* `pnts_lst`: A list of normalized(aligned with x axis, 0 degree) points of objects, heading aligned to 0.
* `mirrored_pnts_lst`: `pnts_lst` and with the mirrored points attached if `apply_mirror` is set to `True`.
"""
all_db_infos_lst = []
box_dims_lst = []
pnts_lst = []
mirrored_pnts_lst = []
for split in splits:
db_info_save_path = Path(root_path) / ("kitti_dbinfos_%s.pkl" % split)
with open(db_info_save_path, "rb") as f:
all_db_infos = pickle.load(f)[type]
for k in range(len(all_db_infos)):
info = all_db_infos[k]
obj_type = info["name"]
if obj_type != type:
continue
gt_box = info["box3d_lidar"]
# box_dims_lst appends [0, 0, 0, dx, dy, dz, 0]
box_dims_lst.append(
np.concatenate(
[
np.zeros_like(gt_box[0:3]),
np.array(gt_box[3:6]),
np.zeros_like(gt_box[6:7]),
],
axis=-1,
)
)
all_db_infos_lst.append(info)
# Get the points inside the object
obj_pnt_fpath = Path(root_path) + info["path"]
car_pnts = get_normalized_cloud(str(obj_pnt_fpath), gt_box, bottom=0.15)[
:, :3
]
mirrored_car_pnts = mirror(car_pnts)
pnts_lst.append(car_pnts)
if apply_mirror:
mirrored_pnts_lst.append(mirrored_car_pnts)
else:
mirrored_pnts_lst.append(car_pnts)
return all_db_infos_lst, box_dims_lst, pnts_lst, mirrored_pnts_lst
def clustering(m_nm, num_cluster, box_dims_lst):
train_box_dims, val_box_dims = box_dims_lst[0], box_dims_lst[1]
if m_nm == "kmeans":
clusterer = KMeans(n_clusters=num_cluster, random_state=1).fit(train_box_dims)
elif m_nm == "DBSCAN":
clusterer = DBSCAN(eps=0.3, min_samples=10).fit(train_box_dims)
core_samples_mask = np.zeros_like(clusterer.labels_, dtype=bool)
core_samples_mask[clusterer.core_sample_indices_] = True
labels = clusterer.labels_
# print(train_box_dims.shape, clusterer.labels_.shape)
# print(clusterer.cluster_centers_)
# print(np.min(train_box_dims, axis=0))
indices = [
np.asarray((clusterer.labels_ == i).nonzero())[0, :] for i in range(cluster_num)
]
return clusterer, indices
def get_normalized_cloud(obj_pnt_fpath, gt_box, bottom=0.0):
"""
Return points with their `(x, y, z)` rotated with -heading, i.e., all lined up to `0 degree`, also with their bottom filtered.
"""
pnts = np.fromfile(str(obj_pnt_fpath), dtype=np.float32).reshape([-1, 4])
pnts = np.concatenate(
[single_rotate_points_along_z(pnts[:, :3], -gt_box[6]), pnts[:, 3:]], axis=1
)
return remove_bottom(pnts, gt_box, bottom)
def remove_bottom(pnts, gt_box, bottom):
"""
Args:
* `pnts`: Lidar Points
* `gt_box`: The 3D bounding box of the object
* `bottom`: A height threshold, deciding which points to keep\n
Return:
`pnts` whose heights are larger than -dz/2 + bottom, where dz is from the `gt_box`.
"""
if bottom == 0.0:
return pnts
zthresh = -gt_box[5] / 2 + bottom
keep_bool = pnts[:, 2] > zthresh
return pnts[keep_bool]
def vis_cluster(clusterer, box_dims, cluster_num):
colors = [
"#e6194b",
"#3cb44b",
"#ffe119",
"#4363d8",
"#f58231",
"#911eb4",
"#46f0f0",
"#f032e6",
"#bcf60c",
"#fabebe",
"#008080",
"#e6beff",
"#9a6324",
"#fffac8",
"#800000",
"#aaffc3",
"#808000",
"#ffd8b1",
"#000075",
"#808080",
"#ffffff",
"#000000",
]
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
#
# # For each set of style and range settings, plot n random points in the box
# # defined by x in [23, 32], y in [0, 100], z in [zlow, zhigh].
# for i in range(box_dims.shape[0]):
# xs = box_dims[i,0]
# ys = box_dims[i,1]
# zs = box_dims[i,2]
# ax.scatter(xs, ys, zs, c=colors[clusterer.labels_[i]])
#
# ax.set_xlabel('X Label')
# ax.set_ylabel('Y Label')
# ax.set_zlabel('Z Label')
# ax.set_aspect('equal')
#
# plt.show()
binary = [clusterer.labels_ == i for i in range(cluster_num)]
box_pnt_lst = [box_dims[binary[i]] for i in range(cluster_num)]
colors_lst = [
tuple(np.array(ImageColor.getcolor(colors[i], "RGB")) / 255.0)
for i in range(cluster_num)
]
size_lst = [0.02 for i in range(cluster_num)]
mode_lst = ["sphere" for i in range(cluster_num)]
# vu.draw_scenes_multi(box_pnt_lst, colors_lst, size_lst, mode_lst, bgcolor=(1,1,1))
vu.draw_scenes_multi(box_pnt_lst, colors_lst, size_lst, mode_lst, bgcolor=(1, 1, 1))
axes = mlab.axes()
mlab.show()
def save_pnts_box(pnts, box, name, path):
template = {"name": name, "points": pnts, "box": box}
with open(path, "wb") as f:
pickle.dump(template, f)
def find_overlaps(base, aug):
x, y = base, aug
index = np.argsort(x)
sorted_x = x[index]
sorted_index = np.searchsorted(sorted_x, y)
yindex = np.take(index, sorted_index, mode="clip")
mask = x[yindex] != y
return mask
def coords3inds(coords, ny, nx):
gperm1 = nx * ny
gperm2 = nx
zdim = coords[:, 2] * gperm1
ydim = coords[:, 1] * gperm2
xdim = coords[:, 0]
inds = zdim + ydim + xdim
return inds.astype(np.int32)
def mirror(pnts, lastchannel=3):
"""
Mirror the `pnts` along y-axis, and remove the mirrored points who are too close to original point.\n
Return:
* original points concatenated with the mirrored points along `axis 0`.
"""
mirror_pnts = np.concatenate(
[pnts[..., 0:1], -pnts[..., 1:2], pnts[..., 2:lastchannel]], axis=-1
)
mirror_pnts = remove_voxelpnts(pnts, mirror_pnts, nearest_dist=0.05)
return np.concatenate([pnts, mirror_pnts], axis=0)
def batch_vis_pair(template, temp_box, pnts_lst, gt_box_arry, ranks):
moved_temp_lst = []
moved_pnts_lst = []
temp_box = np.tile(temp_box, [len(pnts_lst), 1])
temp_box[:, -1] = np.zeros_like(temp_box[:, -1])
gt_box_arry[:, -1] = np.zeros_like(gt_box_arry[:, -1])
width = int(np.ceil(np.sqrt(len(pnts_lst))) * 1.2)
height = int(np.ceil(np.sqrt(len(pnts_lst))) / 1.2)
x = (np.arange(height) - height // 2) * 6
y = (np.arange(width) - width // 2) * 4
xv, yv = np.meshgrid(x, y, indexing="ij")
xv, yv = xv.reshape(-1), yv.reshape(-1)
for ind in range(len(pnts_lst)):
i = ranks[ind]
shift = np.array([[xv[ind], yv[ind], 0]], dtype=np.float)
temp_box[i, :3], gt_box_arry[i, :3] = shift[0], shift[0]
# print(template.shape, pnts.shape)
colors = [
"#e6194b",
"#4363d8",
"#3cb44b",
"#ffe119",
"#f58231",
"#911eb4",
"#46f0f0",
"#f032e6",
"#bcf60c",
"#fabebe",
"#008080",
"#e6beff",
"#9a6324",
"#fffac8",
"#800000",
"#aaffc3",
"#808000",
"#ffd8b1",
"#000075",
"#808080",
"#ffffff",
"#000000",
]
moved_pnts_lst.append(pnts_lst[i] + shift)
moved_temp_lst.append(template + shift)
# print(shift, template.shape, shift.shape, np.mean((template + shift), axis=0))
# print("xv",xv.shape, len(moved_pnts_lst), len(moved_temp_lst))
moved_temp_pnt = np.concatenate(moved_temp_lst, axis=0)
moved_pnts = np.concatenate(moved_pnts_lst, axis=0)
tmp_section = len(moved_temp_pnt) // 200000 + 1
pnt_section = len(moved_pnts) // 200000 + 1
render_pnts_lst = [
moved_temp_pnt[i * 200000 : (i + 1) * 200000] for i in range(tmp_section)
] + [moved_pnts[i * 200000 : (i + 1) * 200000] for i in range(pnt_section)]
colors_lst = [
tuple(np.array(ImageColor.getcolor(colors[0], "RGB")) / 255.0)
for i in range(tmp_section)
] + [
tuple(np.array(ImageColor.getcolor(colors[1], "RGB")) / 255.0)
for i in range(pnt_section)
]
size_lst = [0.02 for i in range(tmp_section)] + [0.04 for i in range(pnt_section)]
mode_lst = ["sphere" for i in range(tmp_section)] + [
"sphere" for i in range(pnt_section)
]
vu.draw_scenes_multi(
render_pnts_lst, colors_lst, size_lst, mode_lst, bgcolor=(1, 1, 1)
) # , gt_boxes=temp_box, gt_boxes_color=colors_lst[0], ref_boxes=gt_box_arry, ref_boxes_color=colors_lst[1])
# vu.draw_scenes_multi([np.concatenate(moved_temp_lst[:3], axis=0), np.concatenate(moved_pnts_lst[:3], axis=0)], colors_lst, size_lst, mode_lst, bgcolor=(1, 1, 1), gt_boxes=temp_box, gt_boxes_color=colors_lst[0], ref_boxes=gt_box_arry, ref_boxes_color=colors_lst[1])
mlab.show()
def vis_pair(template, temp_box, pnts, pnts_box):
if temp_box is not None and pnts_box is not None:
temp_box[:, :3] = np.zeros_like(temp_box[:, :3])
temp_box[:, -1] = np.zeros_like(pnts_box[:, -1])
pnts_box[:, :3] = np.zeros_like(pnts_box[:, :3])
pnts_box[:, -1] = np.zeros_like(pnts_box[:, -1])
# print(template.shape, pnts.shape)
colors = [
"#e6194b",
"#4363d8",
"#3cb44b",
"#ffe119",
"#f58231",
"#911eb4",
"#46f0f0",
"#f032e6",
"#bcf60c",
"#fabebe",
"#008080",
"#e6beff",
"#9a6324",
"#fffac8",
"#800000",
"#aaffc3",
"#808000",
"#ffd8b1",
"#000075",
"#808080",
"#ffffff",
"#000000",
]
pnts_lst = [template, pnts]
colors_lst = [
tuple(np.array(ImageColor.getcolor(colors[i], "RGB")) / 255.0) for i in range(2)
]
size_lst = [0.02 for i in range(2)]
mode_lst = ["sphere" for i in range(2)]
# vu.draw_scenes_multi(pnts_lst, colors_lst, size_lst, mode_lst, bgcolor=(1, 1, 1), gt_boxes=temp_box, ref_boxes=pnts_box)
vu.draw_scenes_multi(
[pnts_lst[0]],
[colors_lst[0]],
[size_lst[0]],
[mode_lst[0]],
bgcolor=(1, 1, 1),
gt_boxes=None,
ref_boxes=None,
)
vu.draw_scenes_multi(
[pnts_lst[1]],
[colors_lst[1]],
[size_lst[1]],
[mode_lst[1]],
bgcolor=(1, 1, 1),
gt_boxes=None,
ref_boxes=None,
)
mlab.show()
def cd_4pose(scene, template):
# points and points_reconstructed are n_points x 3 matrices
dist1, _ = chamfer_dist(toTensor(scene), toTensor(template))
# print("dist1.shape", dist1.shape)
dist_l1 = torch.sqrt(dist1)
# mean_l1, min_l1, max_l1 = torch.mean(dist_l1, dim=1), torch.min(dist_l1, dim=1)[0], torch.max(dist_l1, dim=1)[0]
# print("mean_l1 {}, min_l1 {}, max_l1 {}".format(mean_l1, min_l1, max_l1))
return dist_l1
def single_rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
cosa = np.cos(angle)
sina = np.sin(angle)
zeros = np.zeros_like(angle)
ones = np.ones_like(angle)
rot_matrix = np.stack(
(cosa, sina, zeros, -sina, cosa, zeros, zeros, zeros, ones), axis=0
).reshape(3, 3)
points_rot = np.matmul(points[:, 0:3], rot_matrix)
return points_rot
def get_iou(box_tensor, box_dims_lst):
"""
Returns the IoU of each boxes with all the other boxes. Should be of shape `NxN`, where `N` is the `len(box_dims_lst)`
"""
limit = len(box_dims_lst)
start = 0
iou3d_lst = []
# Split the limit into 10 parts and calculate one part at a time
for i in range(11):
end = min(start + limit // 10, limit)
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(box_tensor[start:end, :], box_tensor)
iou3d_lst.append(iou3d)
start = end
iou3d = torch.cat(iou3d_lst, dim=0)
print("iou3d", iou3d.shape)
return iou3d
def padding_pnt_tensors(pnts_lst, max_num_pnts=None, num_pnts_arry=None):
pnts_padding_lst = []
mask_lst = []
reversemask_lst = []
for i in range(len(pnts_lst)):
if isinstance(pnts_lst[i], torch.Tensor):
padding_pnts = torch.cat(
[
pnts_lst[i],
torch.zeros(
[max_num_pnts - num_pnts_arry[i], 3],
dtype=torch.float,
device="cuda",
),
],
dim=0,
)
else:
padding_pnts = np.concatenate(
[pnts_lst[i], np.zeros([max_num_pnts - num_pnts_arry[i], 3])], axis=0
)
mask = np.concatenate(
[
np.ones([num_pnts_arry[i]], dtype=np.float),
np.zeros([max_num_pnts - num_pnts_arry[i]], dtype=np.float),
]
)
reversemask = np.concatenate(
[
np.zeros([num_pnts_arry[i]], dtype=np.float),
10.0 * np.ones([max_num_pnts - num_pnts_arry[i]], dtype=np.float),
]
)
pnts_padding_lst.append(padding_pnts)
mask_lst.append(mask)
reversemask_lst.append(reversemask)
if isinstance(pnts_padding_lst[0], torch.Tensor):
pnts_padding_tensor = torch.stack(pnts_padding_lst, dim=0)
else:
pnts_padding_tensor = toTensor(np.array(pnts_padding_lst))
mask_tensor = toTensor(np.array(mask_lst))
reversemask_tensor = toTensor(np.array(reversemask_lst))
num_pnts_tensor = toTensor(num_pnts_arry)
return pnts_padding_tensor, mask_tensor, reversemask_tensor, num_pnts_tensor
def toTensor(sample):
return torch.from_numpy(sample).float().to("cuda")
def get_padding_boxpnts_tensors(point_in_box_lst):
max_num_pnts = 0
num_pnts_lst = []
for point_in_box in point_in_box_lst:
max_num_pnts = max(max_num_pnts, len(point_in_box))
num_pnts_lst.append(len(point_in_box))
num_pnts_array = np.array(num_pnts_lst)
(
box_pnts_padding_tensor,
box_mask_tensor,
box_reversemask_tensor,
box_num_pnts_tensor,
) = padding_pnt_tensors(point_in_box_lst, max_num_pnts, num_pnts_array)
return (
box_pnts_padding_tensor,
box_mask_tensor,
box_reversemask_tensor,
box_num_pnts_tensor,
num_pnts_array,
)
def repeat_boxpoints_tensor(boxpoint_tensor, candidate_num):
if boxpoint_tensor.dim() == 3:
gt_boxnum, max_point_num, point_dims = list(boxpoint_tensor.shape)
box_pnts_padding_tensor = (
torch.unsqueeze(boxpoint_tensor, dim=1)
.repeat(1, candidate_num, 1, 1)
.view(gt_boxnum * candidate_num, max_point_num, point_dims)
)
elif boxpoint_tensor.dim() == 2:
gt_boxnum, max_point_num = list(boxpoint_tensor.shape)
box_pnts_padding_tensor = (
torch.unsqueeze(boxpoint_tensor, dim=1)
.repeat(1, candidate_num, 1)
.view(gt_boxnum * candidate_num, max_point_num)
)
else:
gt_boxnum = list(boxpoint_tensor.shape)[0]
box_pnts_padding_tensor = (
torch.unsqueeze(boxpoint_tensor, dim=1)
.repeat(1, candidate_num)
.view(gt_boxnum * candidate_num)
)
return box_pnts_padding_tensor
def find_best_match_boxpnts(
all_db_infos_lst,
box_dims_lst,
sorted_iou,
pnt_thresh_best_iou_indices,
mirrored_pnts_lst,
pnts_lst,
coords_num,
occ_map,
bm_dir,
allrange,
nx,
ny,
voxel_size,
max_num_bm=5,
num_extra_coords=2000,
iou_thresh=0.84,
ex_coords_ratio=10.0,
nearest_dist=0.16,
vis=False,
save=False,
):
"""
Find the best match points inside other boxes and add to the current object, and store the augmented points into a .pkl file. This process is repeated for all objects.
The stored points have headings set to 0.
Parameters:
* `all_db_infos_lst`: list of info from `kitti_dbinfos_%s.pkl`, where s is the split name.
* `box_dims_lst`: Shape = (M * 7), A list of (0, 0, 0, dx, dy, dz, 0), i.e., the size of the box with position set to origin and heading set to 0.
* `sorted_iou`: sorted top 800 iou. Shape = (M * 800)
* `pnt_thresh_best_iou_indices`: mirror car indices with top 800 iou: M * 800
* `mirrored_pnts_lst`: M lst
* `coords_num`: M
* `occ_map`: M * dim
* `max_num_bm`: 5
:return:
"""
for car_id in range(0, len(mirrored_pnts_lst)):
cur_mirrored_pnts_lst = [mirrored_pnts_lst[car_id]]
cur_pnts_lst = [pnts_lst[car_id]]
print("pnt_thresh_best_iou_indices", pnt_thresh_best_iou_indices.shape)
# might cause error if car_id exceeds 800
picked_indices = tuple(pnt_thresh_best_iou_indices[car_id].cpu())
selected_mirrored_pnts_lst = [mirrored_pnts_lst[i] for i in picked_indices]
selected_pnts_lst = [pnts_lst[i] for i in picked_indices]
# print("pnt_thresh_best_iou_indices[car_id]", pnt_thresh_best_iou_indices[car_id].shape, coords_num.shape)
cur_occ_map = occ_map[car_id]
# get occupancy map where the selected mirrored points for each top K IoU objects are filtered with the `car_id` box
selected_occ_map = torch.stack(
[
torch.as_tensor(
space_occ_voxelpnts(
remove_outofbox(
selected_mirrored_pnts_lst[i], box_dims_lst[car_id]
),
allrange,
nx,
ny,
voxel_size=voxel_size,
),
device="cuda",
dtype=torch.int32,
)
for i in range(len(selected_mirrored_pnts_lst))
],
dim=0,
) # M nx ny
selected_sorted_iou, cur_box, selected_pnt_thresh_best_iou_indices = (
sorted_iou[car_id],
box_dims_lst[car_id],
pnt_thresh_best_iou_indices[car_id],
)
bm_pnts, bm_coords_num = find_multi_best_match_boxpnts(
selected_sorted_iou,
cur_box,
cur_mirrored_pnts_lst,
cur_pnts_lst,
selected_mirrored_pnts_lst,
selected_pnts_lst,
selected_pnt_thresh_best_iou_indices,
cur_occ_map,
selected_occ_map,
max_num_bm=max_num_bm,
num_extra_coords=num_extra_coords,
iou_thresh=iou_thresh,
ex_coords_ratio=ex_coords_ratio,
nearest_dist=nearest_dist,
vis=vis,
)
info = all_db_infos_lst[car_id]
image_idx, gt_idx = str(int(info["image_idx"])), str(int(info["gt_idx"]))
if save:
with open(
os.path.join(bm_dir, image_idx + "_" + gt_idx + ".pkl"), "wb"
) as f:
pickle.dump(bm_pnts.astype(np.float32), f)
print(
"{}/{}: bm_vox_num {}, bm_pnt_num {} ".format(
car_id, len(mirrored_pnts_lst), bm_coords_num, bm_pnts.shape[0]
)
)
def remove_outofbox(pnts, box):
"""
Remove `pnts` that are out of the bounding `box`
"""
dim = box[3:6]
point_in_box_mask = np.logical_and(pnts <= dim * 0.5, pnts >= -dim * 0.5)
# N, M
point_in_box_mask = np.prod(point_in_box_mask.astype(np.int8), axis=-1, dtype=bool)
return pnts[point_in_box_mask, :]
def get_batch_stats(dist, num_pnts_tensor, mask_arry, reversemask_arry):
masked_dist = dist * mask_arry
addmin_dist = masked_dist + reversemask_arry
addmax_dist = masked_dist - reversemask_arry
mean_instance = (
torch.sum(masked_dist, dim=1) / num_pnts_tensor
) # N CARS to the template
min_instance = torch.min(addmin_dist, dim=1)[0]
max_instance = torch.max(addmax_dist, dim=1)[0]
mean_instance[mean_instance != mean_instance] = 100.0
return mean_instance, min_instance, max_instance
def find_multi_best_match_boxpnts(
sorted_iou,
gt_box,
cur_mirrored_pnts_lst,
cur_pnts_lst,
picked_mirrored_pnts_lst,
picked_pnts_lst,
selected_indices,
cur_occ_map,
selected_occ_map,
max_num_bm=5,
num_extra_coords=2000,
iou_thresh=0.84,
ex_coords_ratio=10.0,
nearest_dist=0.16,
vis=False,
):
gt_boxnum = len(cur_mirrored_pnts_lst)
(
box_pnts_padding_tensor,
box_mask_tensor,
box_reversemask_tensor,
box_num_pnts_tensor,
box_num_pnts_array,
) = get_padding_boxpnts_tensors(cur_mirrored_pnts_lst)
(
mirr_box_pnts_padding_tensor,
mirr_box_mask_tensor,
mirr_box_reversemask_tensor,
mirr_box_num_pnts_tensor,
mirr_box_num_pnts_array,
) = get_padding_boxpnts_tensors(picked_mirrored_pnts_lst)
candidate_num, num_max_template_points, point_dims = list(
mirr_box_pnts_padding_tensor.shape
)
mirr_box_reversemask_tensor_remote = mirr_box_pnts_padding_tensor + torch.unsqueeze(
mirr_box_reversemask_tensor, dim=-1
)
box_pnts_padding_tensor = repeat_boxpoints_tensor(
box_pnts_padding_tensor, candidate_num
)
box_num_pnts_tensor = repeat_boxpoints_tensor(box_num_pnts_tensor, candidate_num)
box_mask_tensor = repeat_boxpoints_tensor(box_mask_tensor, candidate_num)
box_reversemask_tensor = repeat_boxpoints_tensor(
box_reversemask_tensor, candidate_num
)
if box_pnts_padding_tensor.shape[-2] > 0:
dist1, _ = chamfer_dist(
box_pnts_padding_tensor, mirr_box_reversemask_tensor_remote
) # candidate_num X max num pnt X 3
dist_l1 = torch.sqrt(dist1)
# print("dist_l1", dist_l1.shape, mirr_box_pnts_padding_tensor.shape)
mean_instance, min_instance, max_instance = get_batch_stats(
dist_l1, box_num_pnts_tensor, box_mask_tensor, box_reversemask_tensor
)
mean_instance = mean_instance.view(gt_boxnum, candidate_num)
# min_instance = min_instance.view(gt_boxnum, candidate_num)
max_instance = max_instance.view(gt_boxnum, candidate_num)
else:
mean_instance = torch.zeros(
[gt_boxnum, candidate_num], device="cuda", dtype=torch.float32
)
max_instance = mean_instance.clone()
aug_map = cur_occ_map
bm_pnts = cur_mirrored_pnts_lst[0]
oneside_bm_pnts = cur_pnts_lst[0]
aug_coords_num = 0
for round in range(max_num_bm):
extra_coord_nums = extra_occ(aug_map, selected_occ_map)
heuristic = (
max_instance
+ ex_coords_ratio / extra_coord_nums.unsqueeze(0)
+ (sorted_iou.unsqueeze(0) < iou_thresh) * 2.0
+ (extra_coord_nums.unsqueeze(0) < 30) * 1.0
) # mean_instance + 10. / extra_coord_nums + (sorted_iou < 0.84) * 1.0 #
min_heur_sorted, min_heur_indices = torch.min(heuristic, dim=1)
bm_iou, bm_match_car_ind, bm_extra_vox_num, bm_match_occ_map = (
sorted_iou[min_heur_indices],
selected_indices[min_heur_indices],
extra_coord_nums[min_heur_indices],
selected_occ_map[min_heur_indices, ...],
)
if (
bm_iou.cpu() < iou_thresh and bm_pnts.shape[0] > 0
) or bm_extra_vox_num.cpu() == 0:
break
ind = min_heur_indices.cpu().item()
added_pnts = remove_voxelpnts(
bm_pnts, picked_mirrored_pnts_lst[ind], nearest_dist=nearest_dist
)
if vis:
# vis_pair(added_pnts, None, bm_pnts, np.expand_dims(gt_box, axis=0))
vis_pair(
picked_mirrored_pnts_lst[ind],
None,
bm_pnts,
np.expand_dims(gt_box, axis=0),
)
vis_pair(
picked_pnts_lst[ind],
None,
oneside_bm_pnts,
np.expand_dims(gt_box, axis=0),
)
if added_pnts.shape[0] > 4:
bm_pnts = np.concatenate([bm_pnts, added_pnts], axis=0)
aug_map = aug_map | bm_match_occ_map
aug_coords_num = torch.sum(aug_map).cpu()
print(
"added_pnts",
bm_pnts.shape,
added_pnts.shape,
ind,
picked_mirrored_pnts_lst[ind].shape,
aug_coords_num,
"bm_extra_vox_num",
bm_extra_vox_num,
)
if len(sorted_iou) == 1 or aug_coords_num >= num_extra_coords:
break
elif ind == len(sorted_iou) - 1:
(
sorted_iou,
selected_indices,
selected_occ_map,
max_instance,
mean_instance,
) = (
sorted_iou[:ind],
selected_indices[:ind],
selected_occ_map[:ind],
max_instance[:, :ind],
mean_instance[:, :ind],
)
elif ind == 0:
(
sorted_iou,
selected_indices,
selected_occ_map,
max_instance,
mean_instance,
) = (
sorted_iou[ind + 1 :],
selected_indices[ind + 1 :],
selected_occ_map[ind + 1 :],
max_instance[:, ind + 1 :],
mean_instance[:, ind + 1 :],
)
else:
sorted_iou = torch.cat([sorted_iou[:ind], sorted_iou[ind + 1 :]], dim=0)
selected_indices = torch.cat(
[selected_indices[:ind], selected_indices[ind + 1 :]], dim=0
)
selected_occ_map = torch.cat(
[selected_occ_map[:ind], selected_occ_map[ind + 1 :]], dim=0
)
max_instance = torch.cat(
[max_instance[:, :ind], max_instance[:, ind + 1 :]], dim=1
)
mean_instance = torch.cat(
[mean_instance[:, :ind], mean_instance[:, ind + 1 :]], dim=1
)
print("finish one ")
return bm_pnts, aug_coords_num
def remove_voxelpnts(
sourcepnts, target_pnts, voxel_size=np.array([0.08, 0.08, 0.08]), nearest_dist=None
):
"""
If given `nearest_dist`, remove `target_pnts` whose distances with `sourcepnts` are <= `neareest_dist` and return the remaining `target_pnts`.\n
Else, it will calculate the maximum range created by `sourcepnts` and `target_pnts` and determine whether the voxels where `target_pnts` reside in overlap with the voxels where `sourcepnts` reside in.
"""
augpnts = target_pnts[:, :3]
gtpnts = sourcepnts[:, :3]
if nearest_dist is None:
# get minimum/maximum from each column, i.e., returns (minx, miny, minz) or (maxx, maxy, maxz) for each group of pnts
min_gtpnts, max_gtpnts, min_augpnts, max_augpnts = (
np.min(gtpnts, axis=0),
np.max(gtpnts, axis=0),
np.min(augpnts, axis=0),
np.max(augpnts, axis=0),
)
range = np.concatenate(
[np.minimum(min_gtpnts, min_augpnts), np.maximum(max_gtpnts, max_augpnts)],
axis=0,
)
gtpnts_ind = np.floor(
(gtpnts - np.expand_dims(range[:3], axis=0))
/ np.expand_dims(voxel_size, axis=0)
)
augpnts_ind = np.floor(
(augpnts - np.expand_dims(range[:3], axis=0))
/ np.expand_dims(voxel_size, axis=0)
)
nx, ny = np.ceil((range[3] - range[0]) / voxel_size[0]).astype(np.int), np.ceil(
(range[4] - range[1]) / voxel_size[1]
).astype(np.int)
mask = find_overlaps(
coords3inds(gtpnts_ind, nx, ny), coords3inds(augpnts_ind, nx, ny)
)
# print("augpnts_ind", mask.shape, augpnts_ind.shape, augpnts_ind[mask].shape)
else:
dist_l1 = cd_4pose(
np.expand_dims(augpnts, axis=0), np.expand_dims(gtpnts, axis=0)
)
mask = dist_l1.cpu().numpy()[0] > nearest_dist
return target_pnts[mask]
def extra_occ(cur_occ_map, selected_occ_map):
# print("cur_occ_map, selected_occ_map", cur_occ_map.shape, selected_occ_map.shape)
candi_num, nx, ny = list(selected_occ_map.shape)
excluded_map = (1 - cur_occ_map).view(1, nx, ny).repeat(candi_num, 1, 1)
return torch.sum((selected_occ_map * excluded_map).view(-1, nx * ny), dim=1)
def space_occ_voxelpnts(sourcepnts, allrange, nx, ny, voxel_size=[0.08, 0.08, 0.08]):
"""
Return:
* `occmap`: An occupation map with shape (nx, ny), with each cell as numpy.int32. The cell will contain 1 if occupied, otherwise it will be 0.\n
Note: If `voxel_size` divides `allrange`, an error will occur due to wrong indexing.
"""
occmap = np.zeros([nx, ny], dtype=np.int32)
if sourcepnts.shape[0] > 0:
voxel_size = np.array(voxel_size)
gtpnts = sourcepnts[:, :3]
# get discrete gtpnt position with unit = voxel
gtpnts_ind = np.floor(
(gtpnts - np.expand_dims(allrange[:3], axis=0))
/ np.expand_dims(voxel_size, axis=0)
).astype(int)
occmap[gtpnts_ind[..., 0], gtpnts_ind[..., 1]] = np.ones_like(
gtpnts_ind[..., 0], dtype=np.int32
)
# unique_coords_num = np.sum(occmap)
return occmap
if __name__ == "__main__":
"""
Overall description:
1. Load all points for cars, pedestrian, and bike respectively
2. The points are all normalized to have headings of 0 degree and with part of the bottom filtered
3. IoU between each pair of boxes are calculated, with all center of the boxes moved to origin and heading being set to 0 degree.
4. Occupation map for each object is calculated, with the map being the point's full range divided by voxel size.
5. The occupation map is used to find how many voxels is being occupied by the object. If that value exceeds threshold, the IoU of its box will be sorted by high to low.
"""
vis = False
save = True # False
voxel_size = [0.16, 0.16, 0.16]
obj_types = ["Car", "Cyclist", "Pedestrian"]
apply_mirror_lst = [True, True, False]
PNT_THRESH_lst = [80, 5, 5]
ex_coords_ratio_lst = [50, 5, 5]
max_num_bm_lst = [2, 1, 1]
nearest_dist_lst = [0.10, 0.05, 0.05]
iou_thresh_lst = [0.90, 0.90, 0.90]
num_extra_coords_lst = [2000, 2000, 2000]
for i, obj_type in enumerate(obj_types):
ROOT_DIR = (Path(__file__).resolve().parent / "../../").resolve()
print("ROOT_DIR", ROOT_DIR)
path = ROOT_DIR / "data" / "kitti" / "detection3d"
bm_dir_save_path = path / "bm_{}maxdist_{}num_{}/".format(
ex_coords_ratio_lst[i], max_num_bm_lst[i], obj_type
)
os.makedirs(bm_dir_save_path, exist_ok=True)
all_db_infos_lst, box_dims_lst, pnts_lst, mirrored_pnts_lst = extract_allpnts(
root_path=path,
splits=["train", "val"],
type=obj_type,
apply_mirror=apply_mirror_lst[i],
)
box_tensor = torch.as_tensor(box_dims_lst, device="cuda", dtype=torch.float32)
# Gets the IoU betweeen each pair of boxes, where the boxes only have size and all of their center & heading are the same
iou3d = get_iou(box_tensor, box_dims_lst)
# list of range (minx, miny, minz), and (maxx, maxy, maxz) of mirrored_pnts in one object
range_mirrored = np.array(
[
np.concatenate(
[
np.min(mirrored_pnts_lst[i], axis=0),
np.max(mirrored_pnts_lst[i], axis=0),
],
axis=-1,
)
for i in range(len(mirrored_pnts_lst))
if mirrored_pnts_lst[i].shape[0] > 0
]
)
# The total range considering all mirrored objects
allrange = np.concatenate(
[
np.min(range_mirrored[..., :3], axis=0), # (minx, miny, minz)
np.max(range_mirrored[..., 3:], axis=0), # (maxx, maxy, maxz)
],
axis=-1,
)
# nx: number of voxels in axis x; ny: number of voxels in axis y
nx, ny = np.ceil((allrange[3] - allrange[0]) / voxel_size[0]).astype(
np.int
), np.ceil((allrange[4] - allrange[1]) / voxel_size[1]).astype(np.int32)
# A stack of occupation map for each mirrored points
occ_map = torch.stack(
[
torch.as_tensor(
space_occ_voxelpnts(
mirrored_pnts_lst[i], allrange, nx, ny, voxel_size=voxel_size
),
device="cuda",
dtype=torch.int32,
)
for i in range(len(mirrored_pnts_lst))
],
dim=0,
) # M nx ny
coords_num = torch.sum(occ_map.view(-1, nx * ny), dim=1)
print(
"coords_num", coords_num.shape, torch.min(coords_num), torch.max(coords_num)
)
# get coordinate indexes where an object's total # of occupied voxel > PNT_THRESH_lst[i]
coord_inds = torch.nonzero(coords_num > PNT_THRESH_lst[i])[..., 0]
print("coord_inds", coord_inds.shape)
iou3d = iou3d[:, coord_inds]
# get top K, where K is at most 800, iou3d and their indices for each object
sorted_iou, best_iou_indices = torch.topk(
iou3d, min(800, len(iou3d)), dim=-1, sorted=True, largest=True
)
pnt_thresh_best_iou_indices = coord_inds[best_iou_indices]
print(
"best_iou_indices",
best_iou_indices.shape,
"pnt_thresh_best_iou_indices",
pnt_thresh_best_iou_indices.shape,
len(mirrored_pnts_lst),
)
# exit()
# print("sorted_iou", torch.min(sorted_iou), best_iou_indices.shape, best_iou_indices[0,:5])
find_best_match_boxpnts(
all_db_infos_lst,
box_dims_lst,
sorted_iou,
pnt_thresh_best_iou_indices,
mirrored_pnts_lst,
pnts_lst,
coords_num,
occ_map,
bm_dir_save_path,
allrange,
nx,
ny,
voxel_size,
max_num_bm=max_num_bm_lst[i],
num_extra_coords=num_extra_coords_lst[i],
iou_thresh=iou_thresh_lst[i],
ex_coords_ratio=ex_coords_ratio_lst[i],
nearest_dist=nearest_dist_lst[i],
vis=vis,
save=save,
)
| [
"pickle.dump",
"numpy.maximum",
"torch.sqrt",
"torch.cat",
"numpy.ones",
"numpy.argsort",
"pathlib.Path",
"numpy.sin",
"numpy.arange",
"pickle.load",
"os.path.join",
"sklearn.cluster.DBSCAN",
"sys.path.append",
"numpy.zeros_like",
"numpy.meshgrid",
"sklearn.cluster.KMeans",
"mayavi.m... | [((560, 629), 'sys.path.append', 'sys.path.append', (['"""/home/xharlie/dev/occlusion_pcd/tools/visual_utils"""'], {}), "('/home/xharlie/dev/occlusion_pcd/tools/visual_utils')\n", (575, 629), False, 'import sys\n'), ((6286, 6375), 'visualize_utils.draw_scenes_multi', 'vu.draw_scenes_multi', (['box_pnt_lst', 'colors_lst', 'size_lst', 'mode_lst'], {'bgcolor': '(1, 1, 1)'}), '(box_pnt_lst, colors_lst, size_lst, mode_lst, bgcolor=(\n 1, 1, 1))\n', (6306, 6375), True, 'import visualize_utils as vu\n'), ((6382, 6393), 'mayavi.mlab.axes', 'mlab.axes', ([], {}), '()\n', (6391, 6393), True, 'import mayavi.mlab as mlab\n'), ((6398, 6409), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (6407, 6409), True, 'import mayavi.mlab as mlab\n'), ((6642, 6655), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (6652, 6655), True, 'import numpy as np\n'), ((6699, 6727), 'numpy.searchsorted', 'np.searchsorted', (['sorted_x', 'y'], {}), '(sorted_x, y)\n', (6714, 6727), True, 'import numpy as np\n'), ((6741, 6782), 'numpy.take', 'np.take', (['index', 'sorted_index'], {'mode': '"""clip"""'}), "(index, sorted_index, mode='clip')\n", (6748, 6782), True, 'import numpy as np\n'), ((7317, 7405), 'numpy.concatenate', 'np.concatenate', (['[pnts[..., 0:1], -pnts[..., 1:2], pnts[..., 2:lastchannel]]'], {'axis': '(-1)'}), '([pnts[..., 0:1], -pnts[..., 1:2], pnts[..., 2:lastchannel]],\n axis=-1)\n', (7331, 7405), True, 'import numpy as np\n'), ((7500, 7543), 'numpy.concatenate', 'np.concatenate', (['[pnts, mirror_pnts]'], {'axis': '(0)'}), '([pnts, mirror_pnts], axis=0)\n', (7514, 7543), True, 'import numpy as np\n'), ((7739, 7769), 'numpy.zeros_like', 'np.zeros_like', (['temp_box[:, -1]'], {}), '(temp_box[:, -1])\n', (7752, 7769), True, 'import numpy as np\n'), ((7795, 7828), 'numpy.zeros_like', 'np.zeros_like', (['gt_box_arry[:, -1]'], {}), '(gt_box_arry[:, -1])\n', (7808, 7828), True, 'import numpy as np\n'), ((8043, 8075), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'indexing': '"""ij"""'}), "(x, y, indexing='ij')\n", (8054, 8075), True, 'import numpy as np\n'), ((9169, 9207), 'numpy.concatenate', 'np.concatenate', (['moved_temp_lst'], {'axis': '(0)'}), '(moved_temp_lst, axis=0)\n', (9183, 9207), True, 'import numpy as np\n'), ((9225, 9263), 'numpy.concatenate', 'np.concatenate', (['moved_pnts_lst'], {'axis': '(0)'}), '(moved_pnts_lst, axis=0)\n', (9239, 9263), True, 'import numpy as np\n'), ((10000, 10092), 'visualize_utils.draw_scenes_multi', 'vu.draw_scenes_multi', (['render_pnts_lst', 'colors_lst', 'size_lst', 'mode_lst'], {'bgcolor': '(1, 1, 1)'}), '(render_pnts_lst, colors_lst, size_lst, mode_lst,\n bgcolor=(1, 1, 1))\n', (10020, 10092), True, 'import visualize_utils as vu\n'), ((10487, 10498), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (10496, 10498), True, 'import mayavi.mlab as mlab\n'), ((11674, 11811), 'visualize_utils.draw_scenes_multi', 'vu.draw_scenes_multi', (['[pnts_lst[0]]', '[colors_lst[0]]', '[size_lst[0]]', '[mode_lst[0]]'], {'bgcolor': '(1, 1, 1)', 'gt_boxes': 'None', 'ref_boxes': 'None'}), '([pnts_lst[0]], [colors_lst[0]], [size_lst[0]], [\n mode_lst[0]], bgcolor=(1, 1, 1), gt_boxes=None, ref_boxes=None)\n', (11694, 11811), True, 'import visualize_utils as vu\n'), ((11875, 12012), 'visualize_utils.draw_scenes_multi', 'vu.draw_scenes_multi', (['[pnts_lst[1]]', '[colors_lst[1]]', '[size_lst[1]]', '[mode_lst[1]]'], {'bgcolor': '(1, 1, 1)', 'gt_boxes': 'None', 'ref_boxes': 'None'}), '([pnts_lst[1]], [colors_lst[1]], [size_lst[1]], [\n mode_lst[1]], bgcolor=(1, 1, 1), gt_boxes=None, ref_boxes=None)\n', (11895, 12012), True, 'import visualize_utils as vu\n'), ((12075, 12086), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (12084, 12086), True, 'import mayavi.mlab as mlab\n'), ((12303, 12320), 'torch.sqrt', 'torch.sqrt', (['dist1'], {}), '(dist1)\n', (12313, 12320), False, 'import torch\n'), ((12736, 12749), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (12742, 12749), True, 'import numpy as np\n'), ((12761, 12774), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (12767, 12774), True, 'import numpy as np\n'), ((12787, 12807), 'numpy.zeros_like', 'np.zeros_like', (['angle'], {}), '(angle)\n', (12800, 12807), True, 'import numpy as np\n'), ((12819, 12838), 'numpy.ones_like', 'np.ones_like', (['angle'], {}), '(angle)\n', (12831, 12838), True, 'import numpy as np\n'), ((12979, 13016), 'numpy.matmul', 'np.matmul', (['points[:, 0:3]', 'rot_matrix'], {}), '(points[:, 0:3], rot_matrix)\n', (12988, 13016), True, 'import numpy as np\n'), ((13571, 13598), 'torch.cat', 'torch.cat', (['iou3d_lst'], {'dim': '(0)'}), '(iou3d_lst, dim=0)\n', (13580, 13598), False, 'import torch\n'), ((15706, 15728), 'numpy.array', 'np.array', (['num_pnts_lst'], {}), '(num_pnts_lst)\n', (15714, 15728), True, 'import numpy as np\n'), ((21120, 21173), 'numpy.logical_and', 'np.logical_and', (['(pnts <= dim * 0.5)', '(pnts >= -dim * 0.5)'], {}), '(pnts <= dim * 0.5, pnts >= -dim * 0.5)\n', (21134, 21173), True, 'import numpy as np\n'), ((28045, 28073), 'numpy.array', 'np.array', (['[0.08, 0.08, 0.08]'], {}), '([0.08, 0.08, 0.08])\n', (28053, 28073), True, 'import numpy as np\n'), ((30643, 30677), 'numpy.zeros', 'np.zeros', (['[nx, ny]'], {'dtype': 'np.int32'}), '([nx, ny], dtype=np.int32)\n', (30651, 30677), True, 'import numpy as np\n'), ((6552, 6576), 'pickle.dump', 'pickle.dump', (['template', 'f'], {}), '(template, f)\n', (6563, 6576), False, 'import pickle\n'), ((8197, 8246), 'numpy.array', 'np.array', (['[[xv[ind], yv[ind], 0]]'], {'dtype': 'np.float'}), '([[xv[ind], yv[ind], 0]], dtype=np.float)\n', (8205, 8246), True, 'import numpy as np\n'), ((10631, 10661), 'numpy.zeros_like', 'np.zeros_like', (['temp_box[:, :3]'], {}), '(temp_box[:, :3])\n', (10644, 10661), True, 'import numpy as np\n'), ((10688, 10718), 'numpy.zeros_like', 'np.zeros_like', (['pnts_box[:, -1]'], {}), '(pnts_box[:, -1])\n', (10701, 10718), True, 'import numpy as np\n'), ((10745, 10775), 'numpy.zeros_like', 'np.zeros_like', (['pnts_box[:, :3]'], {}), '(pnts_box[:, :3])\n', (10758, 10775), True, 'import numpy as np\n'), ((10802, 10832), 'numpy.zeros_like', 'np.zeros_like', (['pnts_box[:, -1]'], {}), '(pnts_box[:, -1])\n', (10815, 10832), True, 'import numpy as np\n'), ((15012, 15048), 'torch.stack', 'torch.stack', (['pnts_padding_lst'], {'dim': '(0)'}), '(pnts_padding_lst, dim=0)\n', (15023, 15048), False, 'import torch\n'), ((15153, 15171), 'numpy.array', 'np.array', (['mask_lst'], {}), '(mask_lst)\n', (15161, 15171), True, 'import numpy as np\n'), ((15207, 15232), 'numpy.array', 'np.array', (['reversemask_lst'], {}), '(reversemask_lst)\n', (15215, 15232), True, 'import numpy as np\n'), ((21549, 21578), 'torch.sum', 'torch.sum', (['masked_dist'], {'dim': '(1)'}), '(masked_dist, dim=1)\n', (21558, 21578), False, 'import torch\n'), ((21648, 21677), 'torch.min', 'torch.min', (['addmin_dist'], {'dim': '(1)'}), '(addmin_dist, dim=1)\n', (21657, 21677), False, 'import torch\n'), ((21700, 21729), 'torch.max', 'torch.max', (['addmax_dist'], {'dim': '(1)'}), '(addmax_dist, dim=1)\n', (21709, 21729), False, 'import torch\n'), ((22880, 22932), 'torch.unsqueeze', 'torch.unsqueeze', (['mirr_box_reversemask_tensor'], {'dim': '(-1)'}), '(mirr_box_reversemask_tensor, dim=-1)\n', (22895, 22932), False, 'import torch\n'), ((23539, 23556), 'torch.sqrt', 'torch.sqrt', (['dist1'], {}), '(dist1)\n', (23549, 23556), False, 'import torch\n'), ((24035, 24110), 'torch.zeros', 'torch.zeros', (['[gt_boxnum, candidate_num]'], {'device': '"""cuda"""', 'dtype': 'torch.float32'}), "([gt_boxnum, candidate_num], device='cuda', dtype=torch.float32)\n", (24046, 24110), False, 'import torch\n'), ((24755, 24782), 'torch.min', 'torch.min', (['heuristic'], {'dim': '(1)'}), '(heuristic, dim=1)\n', (24764, 24782), False, 'import torch\n'), ((30731, 30751), 'numpy.array', 'np.array', (['voxel_size'], {}), '(voxel_size)\n', (30739, 30751), True, 'import numpy as np\n'), ((31062, 31110), 'numpy.ones_like', 'np.ones_like', (['gtpnts_ind[..., 0]'], {'dtype': 'np.int32'}), '(gtpnts_ind[..., 0], dtype=np.int32)\n', (31074, 31110), True, 'import numpy as np\n'), ((32643, 32687), 'os.makedirs', 'os.makedirs', (['bm_dir_save_path'], {'exist_ok': '(True)'}), '(bm_dir_save_path, exist_ok=True)\n', (32654, 32687), False, 'import os\n'), ((32944, 33009), 'torch.as_tensor', 'torch.as_tensor', (['box_dims_lst'], {'device': '"""cuda"""', 'dtype': 'torch.float32'}), "(box_dims_lst, device='cuda', dtype=torch.float32)\n", (32959, 33009), False, 'import torch\n'), ((1713, 1728), 'pathlib.Path', 'Path', (['root_path'], {}), '(root_path)\n', (1717, 1728), False, 'from pathlib import Path\n'), ((3400, 3444), 'numpy.zeros_like', 'np.zeros_like', (['clusterer.labels_'], {'dtype': 'bool'}), '(clusterer.labels_, dtype=bool)\n', (3413, 3444), True, 'import numpy as np\n'), ((7949, 7966), 'numpy.arange', 'np.arange', (['height'], {}), '(height)\n', (7958, 7966), True, 'import numpy as np\n'), ((7995, 8011), 'numpy.arange', 'np.arange', (['width'], {}), '(width)\n', (8004, 8011), True, 'import numpy as np\n'), ((12856, 12933), 'numpy.stack', 'np.stack', (['(cosa, sina, zeros, -sina, cosa, zeros, zeros, zeros, ones)'], {'axis': '(0)'}), '((cosa, sina, zeros, -sina, cosa, zeros, zeros, zeros, ones), axis=0)\n', (12864, 12933), True, 'import numpy as np\n'), ((15098, 15124), 'numpy.array', 'np.array', (['pnts_padding_lst'], {}), '(pnts_padding_lst)\n', (15106, 15124), True, 'import numpy as np\n'), ((25875, 25920), 'numpy.concatenate', 'np.concatenate', (['[bm_pnts, added_pnts]'], {'axis': '(0)'}), '([bm_pnts, added_pnts], axis=0)\n', (25889, 25920), True, 'import numpy as np\n'), ((28758, 28780), 'numpy.min', 'np.min', (['gtpnts'], {'axis': '(0)'}), '(gtpnts, axis=0)\n', (28764, 28780), True, 'import numpy as np\n'), ((28794, 28816), 'numpy.max', 'np.max', (['gtpnts'], {'axis': '(0)'}), '(gtpnts, axis=0)\n', (28800, 28816), True, 'import numpy as np\n'), ((28830, 28853), 'numpy.min', 'np.min', (['augpnts'], {'axis': '(0)'}), '(augpnts, axis=0)\n', (28836, 28853), True, 'import numpy as np\n'), ((28867, 28890), 'numpy.max', 'np.max', (['augpnts'], {'axis': '(0)'}), '(augpnts, axis=0)\n', (28873, 28890), True, 'import numpy as np\n'), ((29767, 29798), 'numpy.expand_dims', 'np.expand_dims', (['augpnts'], {'axis': '(0)'}), '(augpnts, axis=0)\n', (29781, 29798), True, 'import numpy as np\n'), ((29800, 29830), 'numpy.expand_dims', 'np.expand_dims', (['gtpnts'], {'axis': '(0)'}), '(gtpnts, axis=0)\n', (29814, 29830), True, 'import numpy as np\n'), ((34895, 34916), 'torch.min', 'torch.min', (['coords_num'], {}), '(coords_num)\n', (34904, 34916), False, 'import torch\n'), ((34918, 34939), 'torch.max', 'torch.max', (['coords_num'], {}), '(coords_num)\n', (34927, 34939), False, 'import torch\n'), ((35068, 35113), 'torch.nonzero', 'torch.nonzero', (['(coords_num > PNT_THRESH_lst[i])'], {}), '(coords_num > PNT_THRESH_lst[i])\n', (35081, 35113), False, 'import torch\n'), ((1840, 1854), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1851, 1854), False, 'import pickle\n'), ((2573, 2588), 'pathlib.Path', 'Path', (['root_path'], {}), '(root_path)\n', (2577, 2588), False, 'from pathlib import Path\n'), ((3206, 3252), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'num_cluster', 'random_state': '(1)'}), '(n_clusters=num_cluster, random_state=1)\n', (3212, 3252), False, 'from sklearn.cluster import KMeans, DBSCAN\n'), ((14441, 14484), 'numpy.ones', 'np.ones', (['[num_pnts_arry[i]]'], {'dtype': 'np.float'}), '([num_pnts_arry[i]], dtype=np.float)\n', (14448, 14484), True, 'import numpy as np\n'), ((14502, 14561), 'numpy.zeros', 'np.zeros', (['[max_num_pnts - num_pnts_arry[i]]'], {'dtype': 'np.float'}), '([max_num_pnts - num_pnts_arry[i]], dtype=np.float)\n', (14510, 14561), True, 'import numpy as np\n'), ((14655, 14699), 'numpy.zeros', 'np.zeros', (['[num_pnts_arry[i]]'], {'dtype': 'np.float'}), '([num_pnts_arry[i]], dtype=np.float)\n', (14663, 14699), True, 'import numpy as np\n'), ((25594, 25624), 'numpy.expand_dims', 'np.expand_dims', (['gt_box'], {'axis': '(0)'}), '(gt_box, axis=0)\n', (25608, 25624), True, 'import numpy as np\n'), ((25771, 25801), 'numpy.expand_dims', 'np.expand_dims', (['gt_box'], {'axis': '(0)'}), '(gt_box, axis=0)\n', (25785, 25801), True, 'import numpy as np\n'), ((28947, 28982), 'numpy.minimum', 'np.minimum', (['min_gtpnts', 'min_augpnts'], {}), '(min_gtpnts, min_augpnts)\n', (28957, 28982), True, 'import numpy as np\n'), ((28984, 29019), 'numpy.maximum', 'np.maximum', (['max_gtpnts', 'max_augpnts'], {}), '(max_gtpnts, max_augpnts)\n', (28994, 29019), True, 'import numpy as np\n'), ((29154, 29188), 'numpy.expand_dims', 'np.expand_dims', (['voxel_size'], {'axis': '(0)'}), '(voxel_size, axis=0)\n', (29168, 29188), True, 'import numpy as np\n'), ((29303, 29337), 'numpy.expand_dims', 'np.expand_dims', (['voxel_size'], {'axis': '(0)'}), '(voxel_size, axis=0)\n', (29317, 29337), True, 'import numpy as np\n'), ((33841, 33880), 'numpy.min', 'np.min', (['range_mirrored[..., :3]'], {'axis': '(0)'}), '(range_mirrored[..., :3], axis=0)\n', (33847, 33880), True, 'import numpy as np\n'), ((33920, 33959), 'numpy.max', 'np.max', (['range_mirrored[..., 3:]'], {'axis': '(0)'}), '(range_mirrored[..., 3:], axis=0)\n', (33926, 33959), True, 'import numpy as np\n'), ((3320, 3351), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(0.3)', 'min_samples': '(10)'}), '(eps=0.3, min_samples=10)\n', (3326, 3351), False, 'from sklearn.cluster import KMeans, DBSCAN\n'), ((5999, 6036), 'PIL.ImageColor.getcolor', 'ImageColor.getcolor', (['colors[i]', '"""RGB"""'], {}), "(colors[i], 'RGB')\n", (6018, 6036), False, 'from PIL import ImageColor\n'), ((11386, 11423), 'PIL.ImageColor.getcolor', 'ImageColor.getcolor', (['colors[i]', '"""RGB"""'], {}), "(colors[i], 'RGB')\n", (11405, 11423), False, 'from PIL import ImageColor\n'), ((13987, 14075), 'torch.zeros', 'torch.zeros', (['[max_num_pnts - num_pnts_arry[i], 3]'], {'dtype': 'torch.float', 'device': '"""cuda"""'}), "([max_num_pnts - num_pnts_arry[i], 3], dtype=torch.float, device\n ='cuda')\n", (13998, 14075), False, 'import torch\n'), ((14310, 14356), 'numpy.zeros', 'np.zeros', (['[max_num_pnts - num_pnts_arry[i], 3]'], {}), '([max_num_pnts - num_pnts_arry[i], 3])\n', (14318, 14356), True, 'import numpy as np\n'), ((14724, 14782), 'numpy.ones', 'np.ones', (['[max_num_pnts - num_pnts_arry[i]]'], {'dtype': 'np.float'}), '([max_num_pnts - num_pnts_arry[i]], dtype=np.float)\n', (14731, 14782), True, 'import numpy as np\n'), ((15396, 15420), 'torch.from_numpy', 'torch.from_numpy', (['sample'], {}), '(sample)\n', (15412, 15420), False, 'import torch\n'), ((20635, 20690), 'os.path.join', 'os.path.join', (['bm_dir', "(image_idx + '_' + gt_idx + '.pkl')"], {}), "(bm_dir, image_idx + '_' + gt_idx + '.pkl')\n", (20647, 20690), False, 'import os\n'), ((25999, 26017), 'torch.sum', 'torch.sum', (['aug_map'], {}), '(aug_map)\n', (26008, 26017), False, 'import torch\n'), ((27337, 27395), 'torch.cat', 'torch.cat', (['[sorted_iou[:ind], sorted_iou[ind + 1:]]'], {'dim': '(0)'}), '([sorted_iou[:ind], sorted_iou[ind + 1:]], dim=0)\n', (27346, 27395), False, 'import torch\n'), ((27428, 27498), 'torch.cat', 'torch.cat', (['[selected_indices[:ind], selected_indices[ind + 1:]]'], {'dim': '(0)'}), '([selected_indices[:ind], selected_indices[ind + 1:]], dim=0)\n', (27437, 27498), False, 'import torch\n'), ((27561, 27631), 'torch.cat', 'torch.cat', (['[selected_occ_map[:ind], selected_occ_map[ind + 1:]]'], {'dim': '(0)'}), '([selected_occ_map[:ind], selected_occ_map[ind + 1:]], dim=0)\n', (27570, 27631), False, 'import torch\n'), ((27690, 27758), 'torch.cat', 'torch.cat', (['[max_instance[:, :ind], max_instance[:, ind + 1:]]'], {'dim': '(1)'}), '([max_instance[:, :ind], max_instance[:, ind + 1:]], dim=1)\n', (27699, 27758), False, 'import torch\n'), ((27818, 27888), 'torch.cat', 'torch.cat', (['[mean_instance[:, :ind], mean_instance[:, ind + 1:]]'], {'dim': '(1)'}), '([mean_instance[:, :ind], mean_instance[:, ind + 1:]], dim=1)\n', (27827, 27888), False, 'import torch\n'), ((29105, 29138), 'numpy.expand_dims', 'np.expand_dims', (['range[:3]'], {'axis': '(0)'}), '(range[:3], axis=0)\n', (29119, 29138), True, 'import numpy as np\n'), ((29254, 29287), 'numpy.expand_dims', 'np.expand_dims', (['range[:3]'], {'axis': '(0)'}), '(range[:3], axis=0)\n', (29268, 29287), True, 'import numpy as np\n'), ((29365, 29411), 'numpy.ceil', 'np.ceil', (['((range[3] - range[0]) / voxel_size[0])'], {}), '((range[3] - range[0]) / voxel_size[0])\n', (29372, 29411), True, 'import numpy as np\n'), ((29428, 29474), 'numpy.ceil', 'np.ceil', (['((range[4] - range[1]) / voxel_size[1])'], {}), '((range[4] - range[1]) / voxel_size[1])\n', (29435, 29474), True, 'import numpy as np\n'), ((34119, 34171), 'numpy.ceil', 'np.ceil', (['((allrange[3] - allrange[0]) / voxel_size[0])'], {}), '((allrange[3] - allrange[0]) / voxel_size[0])\n', (34126, 34171), True, 'import numpy as np\n'), ((34210, 34262), 'numpy.ceil', 'np.ceil', (['((allrange[4] - allrange[1]) / voxel_size[1])'], {}), '((allrange[4] - allrange[1]) / voxel_size[1])\n', (34217, 34262), True, 'import numpy as np\n'), ((2245, 2271), 'numpy.zeros_like', 'np.zeros_like', (['gt_box[0:3]'], {}), '(gt_box[0:3])\n', (2258, 2271), True, 'import numpy as np\n'), ((2297, 2318), 'numpy.array', 'np.array', (['gt_box[3:6]'], {}), '(gt_box[3:6])\n', (2305, 2318), True, 'import numpy as np\n'), ((2344, 2370), 'numpy.zeros_like', 'np.zeros_like', (['gt_box[6:7]'], {}), '(gt_box[6:7])\n', (2357, 2370), True, 'import numpy as np\n'), ((9592, 9629), 'PIL.ImageColor.getcolor', 'ImageColor.getcolor', (['colors[0]', '"""RGB"""'], {}), "(colors[0], 'RGB')\n", (9611, 9629), False, 'from PIL import ImageColor\n'), ((9709, 9746), 'PIL.ImageColor.getcolor', 'ImageColor.getcolor', (['colors[1]', '"""RGB"""'], {}), "(colors[1], 'RGB')\n", (9728, 9746), False, 'from PIL import ImageColor\n'), ((16313, 16352), 'torch.unsqueeze', 'torch.unsqueeze', (['boxpoint_tensor'], {'dim': '(1)'}), '(boxpoint_tensor, dim=1)\n', (16328, 16352), False, 'import torch\n'), ((30948, 30982), 'numpy.expand_dims', 'np.expand_dims', (['voxel_size'], {'axis': '(0)'}), '(voxel_size, axis=0)\n', (30962, 30982), True, 'import numpy as np\n'), ((33415, 33451), 'numpy.min', 'np.min', (['mirrored_pnts_lst[i]'], {'axis': '(0)'}), '(mirrored_pnts_lst[i], axis=0)\n', (33421, 33451), True, 'import numpy as np\n'), ((33477, 33513), 'numpy.max', 'np.max', (['mirrored_pnts_lst[i]'], {'axis': '(0)'}), '(mirrored_pnts_lst[i], axis=0)\n', (33483, 33513), True, 'import numpy as np\n'), ((16627, 16666), 'torch.unsqueeze', 'torch.unsqueeze', (['boxpoint_tensor'], {'dim': '(1)'}), '(boxpoint_tensor, dim=1)\n', (16642, 16666), False, 'import torch\n'), ((16887, 16926), 'torch.unsqueeze', 'torch.unsqueeze', (['boxpoint_tensor'], {'dim': '(1)'}), '(boxpoint_tensor, dim=1)\n', (16902, 16926), False, 'import torch\n'), ((30896, 30932), 'numpy.expand_dims', 'np.expand_dims', (['allrange[:3]'], {'axis': '(0)'}), '(allrange[:3], axis=0)\n', (30910, 30932), True, 'import numpy as np\n'), ((32345, 32359), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (32349, 32359), False, 'from pathlib import Path\n')] |
import logging
import pickle
import random
import re
import time
from logging import Logger
from typing import List, Optional, Type, Union
import matplotlib.pyplot as plt
import numpy as np
from mohou.model.autoencoder import VariationalAutoEncoder
try:
from moviepy.editor import ImageSequenceClip
except Exception:
ImageSequenceClip = None
from mohou.dataset import (
AutoEncoderDataset,
AutoEncoderDatasetConfig,
AutoRegressiveDataset,
AutoRegressiveDatasetConfig,
WeightPolicy,
)
from mohou.default import load_default_image_encoder
from mohou.encoding_rule import EncodingRule
from mohou.file import get_project_path, get_subproject_path
from mohou.model import (
LSTM,
AutoEncoder,
AutoEncoderBase,
AutoEncoderConfig,
LSTMConfig,
)
from mohou.propagator import Propagator
from mohou.trainer import TrainCache, TrainConfig, train
from mohou.types import (
AngleVector,
ElementDict,
GripperState,
ImageBase,
MultiEpisodeChunk,
TerminateFlag,
)
from mohou.utils import canvas_to_ndarray
logger = logging.getLogger(__name__)
def create_default_logger(project_name: str, prefix: str) -> Logger:
timestr = "_" + time.strftime("%Y%m%d%H%M%S")
log_dir_path = get_project_path(project_name) / "log"
log_dir_path.mkdir(parents=True, exist_ok=True)
log_file_path = log_dir_path / (prefix + timestr + ".log")
FORMAT = "[%(levelname)s] %(asctime)s %(name)s: %(message)s"
logging.basicConfig(filename=str(log_file_path), format=FORMAT)
logger = logging.getLogger("mohou")
logger.setLevel(level=logging.INFO)
log_sym_path = log_dir_path / ("latest_" + prefix + ".log")
logger.info("create log symlink :{0} => {1}".format(log_file_path, log_sym_path))
if log_sym_path.is_symlink():
log_sym_path.unlink()
log_sym_path.symlink_to(log_file_path)
return logger
def train_autoencoder(
project_name: str,
image_type: Type[ImageBase],
model_config: AutoEncoderConfig,
dataset_config: AutoEncoderDatasetConfig,
train_config: TrainConfig,
ae_type: Type[AutoEncoderBase] = AutoEncoder,
chunk: Optional[MultiEpisodeChunk] = None,
warm_start: bool = False,
):
if chunk is None:
chunk = MultiEpisodeChunk.load(project_name)
dataset = AutoEncoderDataset.from_chunk(chunk, image_type, dataset_config)
if warm_start:
logger.info("warm start")
tcache = TrainCache.load(project_name, ae_type)
train(tcache, dataset, model=None, config=train_config)
else:
tcache = TrainCache(project_name) # type: ignore[var-annotated]
model = ae_type(model_config) # type: ignore
train(tcache, dataset, model=model, config=train_config)
def train_lstm(
project_name: str,
encoding_rule: EncodingRule,
model_config: LSTMConfig,
dataset_config: AutoRegressiveDatasetConfig,
train_config: TrainConfig,
weighting: Optional[Union[WeightPolicy, List[np.ndarray]]] = None,
chunk: Optional[MultiEpisodeChunk] = None,
warm_start: bool = False,
context_list: Optional[List[np.ndarray]] = None,
):
if chunk is None:
chunk = MultiEpisodeChunk.load(project_name)
if context_list is None:
assert model_config.n_static_context == 0
else:
for context in context_list:
assert len(context) == model_config.n_static_context
dataset = AutoRegressiveDataset.from_chunk(
chunk,
encoding_rule,
augconfig=dataset_config,
weighting=weighting,
static_context_list=context_list,
)
if warm_start:
logger.info("warm start")
tcache = TrainCache.load(project_name, LSTM)
train(tcache, dataset, model=None, config=train_config)
else:
tcache = TrainCache(project_name) # type: ignore[var-annotated]
model = LSTM(model_config)
train(tcache, dataset, model=model, config=train_config)
def visualize_train_histories(project_name: str):
project_path = get_project_path(project_name)
file_paths = sorted(project_path.iterdir())
plot_dir_path = get_subproject_path(project_name, "train_history")
for path in file_paths:
m = re.match(r".*TrainCache.*", path.name)
if m is not None:
pickle_path = project_path / path.name
with pickle_path.open(mode="rb") as f:
tcache: TrainCache = pickle.load(f)
fig, ax = plt.subplots()
tcache.visualize((fig, ax))
image_path = plot_dir_path / (path.name + ".png")
fig.savefig(str(image_path))
print("saved to {}".format(image_path))
def visualize_image_reconstruction(
project_name: str, n_vis: int = 5, ae_type: Type[AutoEncoderBase] = AutoEncoder
):
chunk = MultiEpisodeChunk.load(project_name)
chunk_intact = chunk.get_intact_chunk()
chunk_not_intact = chunk.get_not_intact_chunk()
tcache = TrainCache.load(project_name, ae_type)
assert tcache.best_model is not None
image_type = tcache.best_model.image_type # type: ignore[union-attr]
no_aug = AutoEncoderDatasetConfig(0) # to feed not randomized image
dataset_intact = AutoEncoderDataset.from_chunk(chunk_intact, image_type, no_aug)
dataset_not_intact = AutoEncoderDataset.from_chunk(chunk_not_intact, image_type, no_aug)
for dataset, postfix in zip([dataset_intact, dataset_not_intact], ["intact", "not_intact"]):
idxes = list(range(len(dataset)))
random.shuffle(idxes)
idxes_test = idxes[: min(n_vis, len(dataset))]
for i, idx in enumerate(idxes_test):
image_torch = dataset[idx].unsqueeze(dim=0)
image_torch_reconstructed = tcache.best_model(image_torch)
img = dataset.image_type.from_tensor(image_torch.squeeze(dim=0))
img_reconstructed = dataset.image_type.from_tensor(
image_torch_reconstructed.squeeze(dim=0)
)
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.suptitle("left: original, right: reconstructed")
ax1.imshow(img.to_rgb()._data)
ax2.imshow(img_reconstructed.to_rgb()._data)
save_dir_path = get_subproject_path(project_name, "autoencoder_result")
file_path = save_dir_path / "result-{}-{}.png".format(postfix, i)
plt.savefig(str(file_path))
def visualize_variational_autoencoder(project_name: Optional[str] = None):
tcache = TrainCache.load(project_name, VariationalAutoEncoder)
vae = tcache.best_model
assert vae is not None
save_dir_path = get_subproject_path(project_name, "autoencoder_result")
for axis in range(vae.config.n_bottleneck):
images = vae.get_latent_axis_images(axis)
assert ImageSequenceClip is not None, "check if your moviepy is properly installed"
clip = ImageSequenceClip([im.numpy() for im in images], fps=20)
file_path = save_dir_path / "vae-axis{}.gif".format(axis)
clip.write_gif(str(file_path), fps=20)
def add_text_to_image(image: ImageBase, text: str, color: str):
fig = plt.figure(tight_layout={"pad": 0})
ax = plt.subplot(1, 1, 1)
ax.axis("off")
ax.imshow(image.to_rgb()._data)
bbox = dict(boxstyle="round", facecolor="white", alpha=0.7)
ax.text(7, 1, text, fontsize=15, color=color, verticalalignment="top", bbox=bbox)
fig.canvas.draw()
fig.canvas.flush_events()
return canvas_to_ndarray(fig)
def visualize_lstm_propagation(project_name: str, propagator: Propagator, n_prop: int):
chunk = MultiEpisodeChunk.load(project_name).get_intact_chunk()
save_dir_path = get_subproject_path(project_name, "lstm_result")
image_encoder = load_default_image_encoder(project_name)
for idx, edata in enumerate(chunk):
episode_data = chunk[idx]
image_type = None
for key, encoder in propagator.encoding_rule.items():
if issubclass(key, ImageBase):
image_type = key
assert image_type is not None
n_feed = 20
feed_avs = episode_data.get_sequence_by_type(AngleVector)[:n_feed]
feed_images = episode_data.get_sequence_by_type(image_type)[:n_feed]
# set context if necessary
if propagator.require_static_context:
context = image_encoder.forward(feed_images[0])
propagator.set_static_context(context)
use_gripper = GripperState in propagator.encoding_rule
if use_gripper:
fed_grippers = episode_data.get_sequence_by_type(GripperState)[:n_feed]
print("start lstm propagation")
for i in range(n_feed):
elem_dict = ElementDict([feed_avs[i], feed_images[i]])
if use_gripper:
elem_dict[GripperState] = fed_grippers[i]
propagator.feed(elem_dict)
print("finish lstm propagation")
elem_dict_list = propagator.predict(n_prop)
pred_images = [elem_dict[image_type] for elem_dict in elem_dict_list]
pred_flags = [elem_dict[TerminateFlag].numpy().item() for elem_dict in elem_dict_list]
pred_avs = [elem_dict[AngleVector].numpy() for elem_dict in elem_dict_list]
if use_gripper:
pred_gss = [elem_dict[GripperState].numpy() for elem_dict in elem_dict_list]
n_av_dim = chunk.spec.type_shape_table[AngleVector][0]
n_gs_dim = chunk.spec.type_shape_table[GripperState][0] if use_gripper else 0
fig, axs = plt.subplots(n_av_dim + n_gs_dim, 1)
# plot angle vectors
av_seq_gt = episode_data.get_sequence_by_type(AngleVector)
np_av_seq_gt = np.array([av.numpy() for av in av_seq_gt])
np_av_seq_pred = np.concatenate((np_av_seq_gt[:n_feed], np.array(pred_avs)), axis=0)
i_dim = 0
for i_av_dim in range(n_av_dim):
axs[i_dim].plot(np_av_seq_gt[:, i_av_dim], color="blue", lw=1)
axs[i_dim].plot(np_av_seq_pred[:, i_av_dim], color="red", lw=1)
# determine axes min max
conc = np.hstack((np_av_seq_gt[:, i_av_dim], np_av_seq_pred[:, i_av_dim]))
y_min = np.min(conc)
y_max = np.max(conc)
diff = y_max - y_min
axs[i_dim].set_ylim([y_min - diff * 0.1, y_max + diff * 0.1])
axs[i_dim].set_title("AngleVector dim {}".format(i_av_dim), fontsize=5, pad=0.0)
i_dim += 1
if use_gripper:
gs_seq_gt = episode_data.get_sequence_by_type(GripperState)
np_gs_seq_gt = np.array([gs.numpy() for gs in gs_seq_gt])
np_gs_seq_pred = np.concatenate((np_gs_seq_gt[:n_feed], np.array(pred_gss)), axis=0)
for i_gs_dim in range(n_gs_dim):
axs[i_dim].plot(np_gs_seq_gt[:, i_gs_dim], color="blue", lw=1)
axs[i_dim].plot(np_gs_seq_pred[:, i_gs_dim], color="red", lw=1)
# determine axes min max
conc = np.hstack((np_gs_seq_gt[:, i_gs_dim], np_gs_seq_pred[:, i_gs_dim]))
y_min = np.min(conc)
y_max = np.max(conc)
diff = y_max - y_min
axs[i_dim].set_ylim([y_min - diff * 0.1, y_max + diff * 0.1])
axs[i_dim].set_title("GripperState dim {}".format(i_gs_dim), fontsize=5, pad=0.0)
i_dim += 1
for ax in axs:
ax.grid()
image_path = save_dir_path / "seq-{}{}.png".format(AngleVector.__name__, idx)
fig.savefig(str(image_path), format="png", dpi=300)
print("saved to {}".format(image_path))
# save gif image
print("adding text to images...")
fed_images_with_text = [
add_text_to_image(image, "fed (original) image)", "blue") for image in feed_images
]
clamp = lambda x: max(min(x, 1.0), 0.0) # noqa
pred_images_with_text = [
add_text_to_image(
image, "predicted image (prob-terminated={:.2f})".format(clamp(flag)), "green"
)
for image, flag in zip(pred_images, pred_flags)
]
images_with_text = fed_images_with_text + pred_images_with_text
image_path = save_dir_path / "result-image{}.gif".format(idx)
assert ImageSequenceClip is not None, "check if your moviepy is properly installed"
clip = ImageSequenceClip(images_with_text, fps=20)
clip.write_gif(str(image_path), fps=20)
| [
"mohou.dataset.AutoEncoderDataset.from_chunk",
"mohou.utils.canvas_to_ndarray",
"random.shuffle",
"time.strftime",
"logging.getLogger",
"matplotlib.pyplot.figure",
"mohou.trainer.TrainCache.load",
"mohou.trainer.TrainCache",
"pickle.load",
"moviepy.editor.ImageSequenceClip",
"mohou.file.get_subp... | [((1076, 1103), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1093, 1103), False, 'import logging\n'), ((1545, 1571), 'logging.getLogger', 'logging.getLogger', (['"""mohou"""'], {}), "('mohou')\n", (1562, 1571), False, 'import logging\n'), ((2305, 2369), 'mohou.dataset.AutoEncoderDataset.from_chunk', 'AutoEncoderDataset.from_chunk', (['chunk', 'image_type', 'dataset_config'], {}), '(chunk, image_type, dataset_config)\n', (2334, 2369), False, 'from mohou.dataset import AutoEncoderDataset, AutoEncoderDatasetConfig, AutoRegressiveDataset, AutoRegressiveDatasetConfig, WeightPolicy\n'), ((3416, 3556), 'mohou.dataset.AutoRegressiveDataset.from_chunk', 'AutoRegressiveDataset.from_chunk', (['chunk', 'encoding_rule'], {'augconfig': 'dataset_config', 'weighting': 'weighting', 'static_context_list': 'context_list'}), '(chunk, encoding_rule, augconfig=\n dataset_config, weighting=weighting, static_context_list=context_list)\n', (3448, 3556), False, 'from mohou.dataset import AutoEncoderDataset, AutoEncoderDatasetConfig, AutoRegressiveDataset, AutoRegressiveDatasetConfig, WeightPolicy\n'), ((4024, 4054), 'mohou.file.get_project_path', 'get_project_path', (['project_name'], {}), '(project_name)\n', (4040, 4054), False, 'from mohou.file import get_project_path, get_subproject_path\n'), ((4124, 4174), 'mohou.file.get_subproject_path', 'get_subproject_path', (['project_name', '"""train_history"""'], {}), "(project_name, 'train_history')\n", (4143, 4174), False, 'from mohou.file import get_project_path, get_subproject_path\n'), ((4825, 4861), 'mohou.types.MultiEpisodeChunk.load', 'MultiEpisodeChunk.load', (['project_name'], {}), '(project_name)\n', (4847, 4861), False, 'from mohou.types import AngleVector, ElementDict, GripperState, ImageBase, MultiEpisodeChunk, TerminateFlag\n'), ((4972, 5010), 'mohou.trainer.TrainCache.load', 'TrainCache.load', (['project_name', 'ae_type'], {}), '(project_name, ae_type)\n', (4987, 5010), False, 'from mohou.trainer import TrainCache, TrainConfig, train\n'), ((5139, 5166), 'mohou.dataset.AutoEncoderDatasetConfig', 'AutoEncoderDatasetConfig', (['(0)'], {}), '(0)\n', (5163, 5166), False, 'from mohou.dataset import AutoEncoderDataset, AutoEncoderDatasetConfig, AutoRegressiveDataset, AutoRegressiveDatasetConfig, WeightPolicy\n'), ((5220, 5283), 'mohou.dataset.AutoEncoderDataset.from_chunk', 'AutoEncoderDataset.from_chunk', (['chunk_intact', 'image_type', 'no_aug'], {}), '(chunk_intact, image_type, no_aug)\n', (5249, 5283), False, 'from mohou.dataset import AutoEncoderDataset, AutoEncoderDatasetConfig, AutoRegressiveDataset, AutoRegressiveDatasetConfig, WeightPolicy\n'), ((5309, 5376), 'mohou.dataset.AutoEncoderDataset.from_chunk', 'AutoEncoderDataset.from_chunk', (['chunk_not_intact', 'image_type', 'no_aug'], {}), '(chunk_not_intact, image_type, no_aug)\n', (5338, 5376), False, 'from mohou.dataset import AutoEncoderDataset, AutoEncoderDatasetConfig, AutoRegressiveDataset, AutoRegressiveDatasetConfig, WeightPolicy\n'), ((6496, 6549), 'mohou.trainer.TrainCache.load', 'TrainCache.load', (['project_name', 'VariationalAutoEncoder'], {}), '(project_name, VariationalAutoEncoder)\n', (6511, 6549), False, 'from mohou.trainer import TrainCache, TrainConfig, train\n'), ((6626, 6681), 'mohou.file.get_subproject_path', 'get_subproject_path', (['project_name', '"""autoencoder_result"""'], {}), "(project_name, 'autoencoder_result')\n", (6645, 6681), False, 'from mohou.file import get_project_path, get_subproject_path\n'), ((7134, 7169), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'tight_layout': "{'pad': 0}"}), "(tight_layout={'pad': 0})\n", (7144, 7169), True, 'import matplotlib.pyplot as plt\n'), ((7179, 7199), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (7190, 7199), True, 'import matplotlib.pyplot as plt\n'), ((7468, 7490), 'mohou.utils.canvas_to_ndarray', 'canvas_to_ndarray', (['fig'], {}), '(fig)\n', (7485, 7490), False, 'from mohou.utils import canvas_to_ndarray\n'), ((7670, 7718), 'mohou.file.get_subproject_path', 'get_subproject_path', (['project_name', '"""lstm_result"""'], {}), "(project_name, 'lstm_result')\n", (7689, 7718), False, 'from mohou.file import get_project_path, get_subproject_path\n'), ((7739, 7779), 'mohou.default.load_default_image_encoder', 'load_default_image_encoder', (['project_name'], {}), '(project_name)\n', (7765, 7779), False, 'from mohou.default import load_default_image_encoder\n'), ((1195, 1224), 'time.strftime', 'time.strftime', (['"""%Y%m%d%H%M%S"""'], {}), "('%Y%m%d%H%M%S')\n", (1208, 1224), False, 'import time\n'), ((1244, 1274), 'mohou.file.get_project_path', 'get_project_path', (['project_name'], {}), '(project_name)\n', (1260, 1274), False, 'from mohou.file import get_project_path, get_subproject_path\n'), ((2253, 2289), 'mohou.types.MultiEpisodeChunk.load', 'MultiEpisodeChunk.load', (['project_name'], {}), '(project_name)\n', (2275, 2289), False, 'from mohou.types import AngleVector, ElementDict, GripperState, ImageBase, MultiEpisodeChunk, TerminateFlag\n'), ((2440, 2478), 'mohou.trainer.TrainCache.load', 'TrainCache.load', (['project_name', 'ae_type'], {}), '(project_name, ae_type)\n', (2455, 2478), False, 'from mohou.trainer import TrainCache, TrainConfig, train\n'), ((2487, 2542), 'mohou.trainer.train', 'train', (['tcache', 'dataset'], {'model': 'None', 'config': 'train_config'}), '(tcache, dataset, model=None, config=train_config)\n', (2492, 2542), False, 'from mohou.trainer import TrainCache, TrainConfig, train\n'), ((2570, 2594), 'mohou.trainer.TrainCache', 'TrainCache', (['project_name'], {}), '(project_name)\n', (2580, 2594), False, 'from mohou.trainer import TrainCache, TrainConfig, train\n'), ((2688, 2744), 'mohou.trainer.train', 'train', (['tcache', 'dataset'], {'model': 'model', 'config': 'train_config'}), '(tcache, dataset, model=model, config=train_config)\n', (2693, 2744), False, 'from mohou.trainer import TrainCache, TrainConfig, train\n'), ((3172, 3208), 'mohou.types.MultiEpisodeChunk.load', 'MultiEpisodeChunk.load', (['project_name'], {}), '(project_name)\n', (3194, 3208), False, 'from mohou.types import AngleVector, ElementDict, GripperState, ImageBase, MultiEpisodeChunk, TerminateFlag\n'), ((3670, 3705), 'mohou.trainer.TrainCache.load', 'TrainCache.load', (['project_name', 'LSTM'], {}), '(project_name, LSTM)\n', (3685, 3705), False, 'from mohou.trainer import TrainCache, TrainConfig, train\n'), ((3714, 3769), 'mohou.trainer.train', 'train', (['tcache', 'dataset'], {'model': 'None', 'config': 'train_config'}), '(tcache, dataset, model=None, config=train_config)\n', (3719, 3769), False, 'from mohou.trainer import TrainCache, TrainConfig, train\n'), ((3797, 3821), 'mohou.trainer.TrainCache', 'TrainCache', (['project_name'], {}), '(project_name)\n', (3807, 3821), False, 'from mohou.trainer import TrainCache, TrainConfig, train\n'), ((3869, 3887), 'mohou.model.LSTM', 'LSTM', (['model_config'], {}), '(model_config)\n', (3873, 3887), False, 'from mohou.model import LSTM, AutoEncoder, AutoEncoderBase, AutoEncoderConfig, LSTMConfig\n'), ((3896, 3952), 'mohou.trainer.train', 'train', (['tcache', 'dataset'], {'model': 'model', 'config': 'train_config'}), '(tcache, dataset, model=model, config=train_config)\n', (3901, 3952), False, 'from mohou.trainer import TrainCache, TrainConfig, train\n'), ((4215, 4252), 're.match', 're.match', (['""".*TrainCache.*"""', 'path.name'], {}), "('.*TrainCache.*', path.name)\n", (4223, 4252), False, 'import re\n'), ((5525, 5546), 'random.shuffle', 'random.shuffle', (['idxes'], {}), '(idxes)\n', (5539, 5546), False, 'import random\n'), ((9495, 9531), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(n_av_dim + n_gs_dim)', '(1)'], {}), '(n_av_dim + n_gs_dim, 1)\n', (9507, 9531), True, 'import matplotlib.pyplot as plt\n'), ((12326, 12369), 'moviepy.editor.ImageSequenceClip', 'ImageSequenceClip', (['images_with_text'], {'fps': '(20)'}), '(images_with_text, fps=20)\n', (12343, 12369), False, 'from moviepy.editor import ImageSequenceClip\n'), ((6020, 6038), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (6032, 6038), True, 'import matplotlib.pyplot as plt\n'), ((6232, 6287), 'mohou.file.get_subproject_path', 'get_subproject_path', (['project_name', '"""autoencoder_result"""'], {}), "(project_name, 'autoencoder_result')\n", (6251, 6287), False, 'from mohou.file import get_project_path, get_subproject_path\n'), ((7594, 7630), 'mohou.types.MultiEpisodeChunk.load', 'MultiEpisodeChunk.load', (['project_name'], {}), '(project_name)\n', (7616, 7630), False, 'from mohou.types import AngleVector, ElementDict, GripperState, ImageBase, MultiEpisodeChunk, TerminateFlag\n'), ((8694, 8736), 'mohou.types.ElementDict', 'ElementDict', (['[feed_avs[i], feed_images[i]]'], {}), '([feed_avs[i], feed_images[i]])\n', (8705, 8736), False, 'from mohou.types import AngleVector, ElementDict, GripperState, ImageBase, MultiEpisodeChunk, TerminateFlag\n'), ((10056, 10123), 'numpy.hstack', 'np.hstack', (['(np_av_seq_gt[:, i_av_dim], np_av_seq_pred[:, i_av_dim])'], {}), '((np_av_seq_gt[:, i_av_dim], np_av_seq_pred[:, i_av_dim]))\n', (10065, 10123), True, 'import numpy as np\n'), ((10144, 10156), 'numpy.min', 'np.min', (['conc'], {}), '(conc)\n', (10150, 10156), True, 'import numpy as np\n'), ((10177, 10189), 'numpy.max', 'np.max', (['conc'], {}), '(conc)\n', (10183, 10189), True, 'import numpy as np\n'), ((4420, 4434), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4431, 4434), False, 'import pickle\n'), ((4461, 4475), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4473, 4475), True, 'import matplotlib.pyplot as plt\n'), ((9759, 9777), 'numpy.array', 'np.array', (['pred_avs'], {}), '(pred_avs)\n', (9767, 9777), True, 'import numpy as np\n'), ((10946, 11013), 'numpy.hstack', 'np.hstack', (['(np_gs_seq_gt[:, i_gs_dim], np_gs_seq_pred[:, i_gs_dim])'], {}), '((np_gs_seq_gt[:, i_gs_dim], np_gs_seq_pred[:, i_gs_dim]))\n', (10955, 11013), True, 'import numpy as np\n'), ((11038, 11050), 'numpy.min', 'np.min', (['conc'], {}), '(conc)\n', (11044, 11050), True, 'import numpy as np\n'), ((11075, 11087), 'numpy.max', 'np.max', (['conc'], {}), '(conc)\n', (11081, 11087), True, 'import numpy as np\n'), ((10648, 10666), 'numpy.array', 'np.array', (['pred_gss'], {}), '(pred_gss)\n', (10656, 10666), True, 'import numpy as np\n')] |
'''
# Example dictionary containing information on power excess and shortage for the current timestep
# {key:[Qsum,Qchp_nom,Qchp_min,Qboiler_nom,Qboiler_min,Q_tes_in,Q_tes_out]}
dict_Qlhn={'1001': {'Qsum':[10,10],'Qchp_nom':[8,8],'Qchp_min':[5,5],'Qboiler_nom':[2,2],'Qboiler_min':[1,1]},
'1002': {'Qsum':[4,4],'Qchp_nom':[2,2],'Qchp_min':[2,2],'Qboiler_nom':[2,2],'Qboiler_min':[1,1]},
'1003': {'Qsum':[6,6],'Qchp_nom':[3,3],'Qchp_min':[2,2],'Qboiler_nom':[3,3],'Qboiler_min':[1,1]},
'1004': {'Qsum':[-15,-8],'Qchp_nom':[0,0],'Qchp_min':[0,0],'Qboiler_nom':[0,0],'Qboiler_min':[0,0]},
'1005': {'Qsum':[-15,0],'Qchp_nom':[0,0],'Qchp_min':[0,0],'Qboiler_nom':[0,0],'Qboiler_min':[0,0]},
'1006': {'Qsum':[10,10],'Q_CHP':[5,0],'Qchp_min':[1,0],'Qboiler_nom':[5,0],'Qboiler_min':[1,0]}
print(dict_Qlhn.keys())
time=[0,1]
'''
'''
This script determines which buildings supply their thermal excess to the LHN in order to cover the demand of other buildings
connected to the LHN which cannot supply their demand by their own energy Systems.
Qsum is the total amount of energy which can be supplied or must be taken from the LHN. Qsum>0 means supply, Qsum<0 means demand.
input:
dict_Qlhn: dictionary containing information about excess and shortage of each building over time, excess is also divided up in excess
per energysystem
dict_Qlhn={'nodenumber':{'Q_sum':[t1,t2,t3,...],'Qchp_nom':[t1,t2,t3,...],'Qchp_min':[t1,t2,t3,...],'Qboiler_nom':[t1,t2,t3,...],'Qboiler_min':[t1,t2,t3,...]}}
sortingmethod: 'str': 'COSTS' or 'CO2'. Sort supply priotity in accordance to costs or CO2 emissions [CHP first, then boiler, then EH]
time: 'list': List containing all the timesteps
LHN_supply_method: 'str', method to set the sequence of activated energy system
'flexible'=Use CHP first, then Boiler
'static' =Use CHP+Boiler
subcity_building_nodes: 'list', list containing all buildingnode interconnected to a subcity
'''
import pycity_calc.toolbox.networks.network_ops as netop
import pycity_calc.toolbox.dimensioning.dim_networks as dim_net
import pycity_calc.simulation.energy_balance_optimization.energy_balance_building as EB2
#import pycity_calc.simulation.energy_balance_optimization.Complex_city_gen as CCG
import pycity_calc.simulation.energy_balance_optimization.test_city as testcity
import matplotlib.pyplot as plt#
import numpy as np
import pickle
import pycity_calc.cities.city as cit
# define invalid Individual Error.
class invalidind2(Exception):
pass
# Determine which Buildings are chosen to cover the demand of the LHN
def city_energy_balance(City_Object, dict_Qlhn, sortingmethod='CO2', LHN_supply_method = "flexible", subcity_building_nodes=None):
################
#calc lhn losses
#create empty grapg
lhn_graph = cit.City(environment=City_Object.environment)
#get list of lhn connected nodes
lhn_con_nodes = netop.get_list_with_energy_net_con_node_ids(City_Object,search_node=subcity_building_nodes[0], network_type='heating')
list_n_lhn = []
# Add building nodes of nodelist to lhn_graph
for n in lhn_con_nodes:
if 'node_type' in City_Object.nodes[n]:
if City_Object.nodes[n]['node_type'] == 'heating' or City_Object.nodes[n]['node_type'] == 'building':
curr_pos = City_Object.nodes[n]['position']
# Add nodes to city_copy
lhn_graph.add_node(n, position=curr_pos, node_type='heating')
list_n_lhn.append(n)
# Add all edges of type heating to lhn_graph
for u, v in City_Object.edges():
if u in lhn_con_nodes and v in lhn_con_nodes:
if 'network_type' in City_Object.edges[u, v]:
if City_Object.edges[u, v]['network_type'] == 'heating' or City_Object.edges[u, v]['network_type'] == 'heating_and_deg':
# Add street edge to street_graph
lhn_graph.add_edge(u, v, network_type='heating')
temp_vl = City_Object.edges[u, v]['temp_vl']
temp_rl = City_Object.edges[u, v]['temp_rl']
d_i = City_Object.edges[u, v]['d_i']
#TODO: find better way to get temp and di
#Add nodelist street
lhn_graph.nodelist_street = list_n_lhn
#add weights to edges
netop.add_weights_to_edges(lhn_graph)
#calc total length of LHN network
length = netop.sum_up_weights_of_edges(lhn_graph, network_type='heating')
#print('LHN networklength in m:', length)
# calc total power losses
u_pipe = dim_net.estimate_u_value(d_i)
temp_environment = City_Object.environment.temp_ground
Q_dot_loss = dim_net.calc_pipe_power_loss(length, u_pipe, temp_vl, temp_rl, temp_environment)
#print('LHN losses in W:', Q_dot_loss)
################
# save number of timesteps
timesteps=City_Object.environment.timer.timestepsTotal
# Create dict to store results
dict_supply={}
for node in dict_Qlhn.keys():
dict_supply.update({node: {'Qchp_nom': np.zeros(timesteps), 'Qboiler_nom': np.zeros(timesteps)}})
# usefull for debugging. All print statements are stored to .txt file
#import sys
#filename = open("results.txt", 'w')
#sys.stdout = filename
# Loop over all timesteps
for t in range(timesteps):
####print("############timestep", t, "###############")
# Check if the demand can be covered by the supply
Qsum_total = 0 # initialization
demand_at_timestep = False
for Qsum_i in dict_Qlhn.items():
Qsum_total = Qsum_total+Qsum_i[1]['Qsum'][t] # sum all Qsum
if Qsum_i[1]['Qsum'][t] < 0:
demand_at_timestep = True
#only if there is a demand add the lhn losses
if demand_at_timestep:
Qsum_total = Qsum_total - Q_dot_loss
#TODO might want to add a factor
#####if Qsum_total >= 0:
##### print('enough supply\nQsum_total:', Qsum_total)
if Qsum_total < -0.001:
print('not enough supply\nQsum_total:', Qsum_total)
raise invalidind2 # raise the invalid Individual Error
# Create dictionaries containing only suppliers or only demanders
supplier_nodes_CHP_B = {} # initialization
supplier_nodes_boiler = {} # initialization
consumer_nodes = {} # initialization
for Qsum_i in dict_Qlhn.items(): # loop over all building nodes
if Qsum_i[1]['Qsum'][t] > 0: # Qsum>0->supply
subdict_CHP_B = {}
subdict_boiler = {}
if Qsum_i[1]['Qchp_nom'][t] > 0: # Supply from CHP and Boiler
for subitem in Qsum_i[1].items():
subdict_CHP_B.update({subitem[0]: subitem[1][t]})
supplier_nodes_CHP_B.update({Qsum_i[0]: subdict_CHP_B})
elif Qsum_i[1]['Qchp_nom'][t] == 0 and Qsum_i[1]['Qeh_nom'][t] == 0: # Supply from boiler
for subitem in Qsum_i[1].items():
subdict_boiler.update({subitem[0]: subitem[1][t]})
supplier_nodes_boiler.update({Qsum_i[0]: subdict_boiler})
elif Qsum_i[1]['Qsum'][t] < 0: # Qsum<0->demand
subdict = {}
for subitem in Qsum_i[1].items():
subdict.update({subitem[0]: subitem[1][t]})
consumer_nodes.update({Qsum_i[0]: subdict})
supplier_nodes = {**supplier_nodes_CHP_B,**supplier_nodes_boiler} # merge to a general supply dict
#print("supplier_nodes with CHP and B:", supplier_nodes_CHP_B.keys())
#print("supplier_nodes with EH:", supplier_nodes_EH.keys())
####print("supplier_nodes:", supplier_nodes)
####print("consumer_nodes:", consumer_nodes)
# Create a list which only contains building nodes('str'), sorted according to a sortingmethod('COSTS', 'CO2')
list_supplier_priority_CHP_B = [] # initialization
list_supplier_priority_boiler = [] # initialization
list_supplier_priority = [] # initialization
if sortingmethod == 'CO2':
list_supplier_CHP_B_priority_temp = sorted(supplier_nodes_CHP_B.items(), key=lambda x: x[1]['Qchp_nom'], reverse=True) #return a list, with items sorted by Qchp_nom
for item in list_supplier_CHP_B_priority_temp:
list_supplier_priority_CHP_B.append(item[0])
list_supplier_boiler_priority_temp = sorted(supplier_nodes_boiler.items(), key=lambda x: x[1]['Qboiler_nom'], reverse=True) # return a list, with items sorted by boiler_nom
for item in list_supplier_boiler_priority_temp:
list_supplier_priority_boiler.append(item[0])
list_supplier_priority=list_supplier_priority_CHP_B+list_supplier_priority_boiler
elif sortingmethod == 'COSTS':
list_supplier_priority = []
# TODO: implement, maybe a second dict_energysource_prices is needed
####print("list_provider_priority:", list_supplier_priority)
# From the supply priority list define buildings that actual supply.
# Also calculate the actual amount to be supplied by each building
# calculate total demand
Q_demand = 0 # initialization
for Qsum_i in consumer_nodes.items():
Q_demand = Q_demand+abs(Qsum_i[1]['Qsum'])
# if the LHN is used the thermal losses are added to demand
# this method neglects the cooling and reheating process of the lhn.
if Q_demand > 0:
Q_demand = Q_demand + Q_dot_loss
#TODO: good way??
#TODO might want to add a factor
####print('demand: ',Q_demand)
Q_supply_sum = 0 # initialization
Q_supply_sum_min = 0 # initialization
# determine suppliers: First CHP, then Boiler, then EH
if LHN_supply_method == 'flexible':
supply = True # as long as supply is true, the supply is not covered by the buildings selected so far
CHP_partload = False # boolean to enter the "minimize the remainder by CHP partload" loop
B_partload = False # boolean to enter the "minimize the remainder by B partload" loop
#EH_partload = False # boolean to enter the "minimize the remainder by EH partload" loop
# go through suppliers list, determine which supplier
# has to actually supply. Stop when enough supply summed up
# to cover the demand
##############################
# try to cover demand with CHP
##############################
# sum up maximum CHP supply building by building
for node in list_supplier_priority:
if Q_supply_sum < Q_demand:
Q_supply_max = supplier_nodes[node]['Qchp_nom']
Q_supply_sum = Q_supply_sum+Q_supply_max
Q_supply_min = supplier_nodes[node]['Qchp_min']
Q_supply_sum_min = Q_supply_sum_min+Q_supply_min
dict_supply[node]['Qchp_nom'][t] = supplier_nodes[node]['Qchp_nom']
# If all demand can be covered by the CHP
if Q_supply_sum >= Q_demand:
supply = False # demand is covered, no more suppliers are necessary
CHP_partload = True # enter the partload loop to fit supply to demand
break
# try to exactly fit supply to demand:
# reduce supply by switching last selected supplier's CHP to partload to reduce remainder
if CHP_partload:
if Q_supply_sum != Q_demand: # only enter when demand is not exactly met
remainder = Q_supply_sum-Q_demand # Energy that would be wasted if all system ran in nominal condition
possible_reduction = supplier_nodes[node]['Qchp_nom']-supplier_nodes[node]['Qchp_min'] # possible reduction by switching to min_load
####print('remainder:', remainder)
####print('possible_reduction: ',possible_reduction, 'at node:', node)
if remainder < possible_reduction:
dict_supply[node]['Qchp_nom'][t] = possible_reduction-remainder+supplier_nodes[node]['Qchp_min']
####print('last suppliers Q_CHP in partload')
if remainder >= possible_reduction:
dict_supply[node]['Qchp_nom'][t] = supplier_nodes[node]['Qchp_nom'] - remainder
####print('last suppliers Q_CHP in min_load')
############################################
# try to cover remaining demand with Boiler
############################################
if supply:
# sum up maximum boiler supply building by building
for node in list_supplier_priority:
if Q_supply_sum < Q_demand:
Q_supply_max = supplier_nodes[node]['Qboiler_nom']
Q_supply_sum = Q_supply_sum+Q_supply_max
Q_supply_min = supplier_nodes[node]['Qboiler_min']
Q_supply_sum_min = Q_supply_sum_min+Q_supply_min
dict_supply[node]['Qboiler_nom'][t] = supplier_nodes[node]['Qboiler_nom']
# If all demand can be covered by including boilers
if Q_supply_sum >= Q_demand:
supply = False # demand is covered, no more suppliers are necessary
B_partload = True # enter the partload loop to fit supply to demand
break
# try to exactly fit supply to demand:
# reduce supply by switching last selected supplier's Boiler to partload to reduce remainder
if B_partload:
if Q_supply_sum != Q_demand: # only enter when demand is not exactly met
remainder = Q_supply_sum-Q_demand # Energy that would be wasted if all system ran in nominal condition
possible_reduction = supplier_nodes[node]['Qboiler_nom']-supplier_nodes[node]['Qboiler_min'] # possible reduction by switching to min_load
####print('remainder:', remainder)
####print('possible_reduction: ',possible_reduction, 'at node:', node)
if remainder < possible_reduction:
dict_supply[node]['Qboiler_nom'][t] = possible_reduction-remainder+supplier_nodes[node]['Qboiler_min']
####print('last suppliers B in partload')
if remainder >= possible_reduction:
dict_supply[node]['Qboiler_nom'][t] = supplier_nodes[node]['Qboiler_nom'] - remainder
####print('last suppliers B in min_load')
#assert supply == False, ("Demand cannot be covered by LHN")
'''
#######################################
# try to cover remaining demand with EH
#######################################
if supply:
# sum up maximum EH supply building by building
for node in list_supplier_priority:
Q_supply = supplier_nodes[node]['Qeh_nom']
Q_supply_sum = Q_supply_sum+Q_supply
# Q_supply_min = supplier_nodes[node]['Q_EH_min']
# Q_supply_sum_min = Q_supply_sum_min+Q_supply_min
dict_supply[node]['Qeh_nom'][t] = supplier_nodes[node]['Qeh_nom']
# If all demand can be covered by the EH
if Q_supply_sum >= Q_demand:
# EH_partload = True
break
'''
'''
# try to exactly fit supply to demand:
# reduce supply by switching last selected supplier's EH to partload to reduce remainder
if EH_partload:
if Q_supply_sum != Q_demand:
remainder = Q_supply_sum-Q_demand # Energy that would be wasted if all system ran in nominal condition
possible_reduction = supplier_nodes[node]['Qeh_nom']-supplier_nodes[node]['Q_EH_min'] # possible reduction by switching to min_load
print('remainder:', remainder)
print('possible_reduction: ',possible_reduction, 'at node:', node)
if remainder<possible_reduction:
dict_supply[node]['Qeh_nom'][t] = remainder
print('last suppliers EH in partload')
if remainder>=possible_reduction:
dict_supply[node]['Qeh_nom'][t] = supplier_nodes_EH[node]['Q_EH_min']
print('last suppliers EH in min_load')
'''
####for node in dict_supply.keys():
####print('QCHP',node,': ', dict_supply[node]['Qchp_nom'][t], '\n',
####'Qboiler',node,': ', dict_supply[node]['Qboiler_nom'][t])
elif LHN_supply_method == 'static':
#do something
A=1
#TODO: implement: CHP and Boiler always operate together
####for key in dict_supply.keys():
####print("dict_supply_Qboiler:",key,"\n",dict_supply[key]['Qboiler_nom'],'\n')
####print("dict_supply_Qchp:",key,"\n",dict_supply[key]['Qchp_nom'],'\n')
#print("dict_supply_t:\n",dict_supply['1001'],'\n',dict_supply['1002'],'\n',dict_supply['1003'],'\n',dict_supply['1004'],'\n',dict_supply['1005'],'\n',dict_supply['1006'],'\n')
#print("###########################\n timestep", t, " done \n###########################")
#def add_LHN_results_to_city_object(dict_supply, City_Object, timesteps):
for node in dict_supply.keys(): # loop over all buildings with LHN
Bes = City_Object.nodes[int(node)]['entity'].bes # get Bes from City_Object
for t in range(timesteps): #timesteps
if dict_supply[node]['Qboiler_nom'][t] != 0:
# Add the LHN supply amount to the current stored value
boiler_supply=dict_supply[str(node)]['Qboiler_nom'][t]+City_Object.nodes[int(node)]['heat_demand_for_boiler'][t]
if boiler_supply<Bes.boiler.qNominal*Bes.boiler.lowerActivationLimit:
#if the needed amount is below LAL run in LAL and waste rest of the energy
boiler_supply=Bes.boiler.qNominal*Bes.boiler.lowerActivationLimit
power_boiler, power_boiler_in = Bes.boiler.calc_boiler_all_results(boiler_supply, t)
if dict_supply[node]['Qchp_nom'][t] != 0:
# Add the LHN supply amount to the current stored value
chp_supply=dict_supply[str(node)]['Qchp_nom'][t]+City_Object.nodes[int(node)]['heat_demand_for_chp'][t]
boiler_supply = dict_supply[str(node)]['Qboiler_nom'][t] + City_Object.nodes[int(node)]['heat_demand_for_boiler'][t]
if chp_supply<Bes.chp.qNominal*Bes.chp.lowerActivationLimit:
#if the needed amount is below CHP LAL try to shift demand to boiler
if chp_supply + boiler_supply <= Bes.boiler.qNominal and chp_supply + boiler_supply > Bes.boiler.qNominal*Bes.boiler.lowerActivationLimit:
# boiler can supply chp_supply in addtion to his load
boiler_supply += chp_supply
chp_supply = 0
power_boiler, power_boiler_in = Bes.boiler.calc_boiler_all_results(boiler_supply, t)
elif chp_supply + boiler_supply <= Bes.boiler.qNominal*Bes.boiler.lowerActivationLimit:
# if boiler load + chp_supply are below boiler LAL, run boiler at LAL
boiler_supply = Bes.boiler.qNominal*Bes.boiler.lowerActivationLimit
chp_supply = 0
power_boiler, power_boiler_in = Bes.boiler.calc_boiler_all_results(boiler_supply, t)
else:
# boiler can not cover the additional load, CHP must run at partload
chp_supply=Bes.chp.qNominal*Bes.chp.lowerActivationLimit
(power_thermal_chp, power_electrical_chp, fuel_power_in_chp)=Bes.chp.th_op_calc_all_results(chp_supply, t)
if Bes.hasChp and Bes.hasBoiler:
City_Object.nodes[int(node)]['fuel demand'] = Bes.chp.array_fuel_power+Bes.boiler.array_fuel_power
City_Object.nodes[int(node)]['power_el_chp'] = Bes.chp.totalPOutput
elif Bes.hasChp == False and Bes.hasBoiler:
City_Object.nodes[int(node)]['fuel demand'] = Bes.boiler.array_fuel_power
return City_Object, dict_supply
if __name__ == '__main__':
city_object=testcity.run_city_generator(list_types=['HP','CHP','CHP','HP','CHP','CHP'],year = 2010,
timestep = 3600,
livingArea=[120,130,140,120,130,140],
b_Space_heat_demand=True,
specificDemandSH=[100,110,120,100,110,120],
annualDemandel=[3000, 3000, 3000,3000, 3000, 3000],
profileType=['H0','H0','H0','H0','H0','H0'],
methodel=[1,1,1,1,1,1],
b_domestic_hot_water=False,
b_el_demand=True,
roof_usabl_pv_area=[30, 30, 30,30, 30, 30],
boiler_q_nominal=[3000, 5500, 6000,3000, 5500, 6000],
boiler_eta=[0.9, 0.9, 0.9,0.9, 0.9, 0.9],
boiler_lal=[0.5, 0.5, 0.5,0.5, 0.5, 0.5],
tes_capacity=[700, 700, 700,700, 700, 700],
tes_k_loss=[0, 0, 0,0,0,0],
tes_t_max=[95, 95, 95,95, 95, 95],
eh_q_nominal=[4000, 4000, 4000,4000, 4000, 4000],
hp_q_nominal=[7000, 7000, 7000,7000, 7000, 7000],
hp_lal=[0.5, 0.5, 0.5,0.5, 0.5, 0.5],
chp_p_nominal=[1500, 2000, 2000,1500, 2000, 2000],
chp_q_nominal=[4000, 5000, 5000,4000, 5000, 5000],
chp_eta_total=[0.9, 0.9, 0.9,0.9, 0.9, 0.9],
chp_lal=[0.5, 0.5, 0.5,0.5, 0.5, 0.5],
PVarea=[30,0,0,30,0,0],
bat_capacity=[100000,0,0,100000,0,0],
list_etaCharge=[0.96, 0.96, 0.96,0.96, 0.96, 0.96],
list_etaDischarge=[0.95, 0.95, 0.95,0.95, 0.95, 0.95]
)
Calculator=EB2.calculator(city_object)
dict_bes_data=Calculator.assembler()
####print('Dict city data', dict_bes_data)
for i in range(len(dict_bes_data)):
city_object, dict_Qlhn, dict_supply = Calculator.eb_balances(dict_bes_data,i)
#save all results
with open('all_results.pkl', 'wb') as output:
pickle.dump(city_object, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(dict_supply, output, pickle.HIGHEST_PROTOCOL)
####print('line just for Breakpoint')
| [
"pickle.dump",
"pycity_calc.toolbox.dimensioning.dim_networks.estimate_u_value",
"pycity_calc.toolbox.networks.network_ops.get_list_with_energy_net_con_node_ids",
"pycity_calc.toolbox.dimensioning.dim_networks.calc_pipe_power_loss",
"numpy.zeros",
"pycity_calc.simulation.energy_balance_optimization.energy... | [((2919, 2964), 'pycity_calc.cities.city.City', 'cit.City', ([], {'environment': 'City_Object.environment'}), '(environment=City_Object.environment)\n', (2927, 2964), True, 'import pycity_calc.cities.city as cit\n'), ((3022, 3146), 'pycity_calc.toolbox.networks.network_ops.get_list_with_energy_net_con_node_ids', 'netop.get_list_with_energy_net_con_node_ids', (['City_Object'], {'search_node': 'subcity_building_nodes[0]', 'network_type': '"""heating"""'}), "(City_Object, search_node=\n subcity_building_nodes[0], network_type='heating')\n", (3065, 3146), True, 'import pycity_calc.toolbox.networks.network_ops as netop\n'), ((4430, 4467), 'pycity_calc.toolbox.networks.network_ops.add_weights_to_edges', 'netop.add_weights_to_edges', (['lhn_graph'], {}), '(lhn_graph)\n', (4456, 4467), True, 'import pycity_calc.toolbox.networks.network_ops as netop\n'), ((4519, 4583), 'pycity_calc.toolbox.networks.network_ops.sum_up_weights_of_edges', 'netop.sum_up_weights_of_edges', (['lhn_graph'], {'network_type': '"""heating"""'}), "(lhn_graph, network_type='heating')\n", (4548, 4583), True, 'import pycity_calc.toolbox.networks.network_ops as netop\n'), ((4674, 4703), 'pycity_calc.toolbox.dimensioning.dim_networks.estimate_u_value', 'dim_net.estimate_u_value', (['d_i'], {}), '(d_i)\n', (4698, 4703), True, 'import pycity_calc.toolbox.dimensioning.dim_networks as dim_net\n'), ((4780, 4865), 'pycity_calc.toolbox.dimensioning.dim_networks.calc_pipe_power_loss', 'dim_net.calc_pipe_power_loss', (['length', 'u_pipe', 'temp_vl', 'temp_rl', 'temp_environment'], {}), '(length, u_pipe, temp_vl, temp_rl, temp_environment\n )\n', (4808, 4865), True, 'import pycity_calc.toolbox.dimensioning.dim_networks as dim_net\n'), ((21157, 22446), 'pycity_calc.simulation.energy_balance_optimization.test_city.run_city_generator', 'testcity.run_city_generator', ([], {'list_types': "['HP', 'CHP', 'CHP', 'HP', 'CHP', 'CHP']", 'year': '(2010)', 'timestep': '(3600)', 'livingArea': '[120, 130, 140, 120, 130, 140]', 'b_Space_heat_demand': '(True)', 'specificDemandSH': '[100, 110, 120, 100, 110, 120]', 'annualDemandel': '[3000, 3000, 3000, 3000, 3000, 3000]', 'profileType': "['H0', 'H0', 'H0', 'H0', 'H0', 'H0']", 'methodel': '[1, 1, 1, 1, 1, 1]', 'b_domestic_hot_water': '(False)', 'b_el_demand': '(True)', 'roof_usabl_pv_area': '[30, 30, 30, 30, 30, 30]', 'boiler_q_nominal': '[3000, 5500, 6000, 3000, 5500, 6000]', 'boiler_eta': '[0.9, 0.9, 0.9, 0.9, 0.9, 0.9]', 'boiler_lal': '[0.5, 0.5, 0.5, 0.5, 0.5, 0.5]', 'tes_capacity': '[700, 700, 700, 700, 700, 700]', 'tes_k_loss': '[0, 0, 0, 0, 0, 0]', 'tes_t_max': '[95, 95, 95, 95, 95, 95]', 'eh_q_nominal': '[4000, 4000, 4000, 4000, 4000, 4000]', 'hp_q_nominal': '[7000, 7000, 7000, 7000, 7000, 7000]', 'hp_lal': '[0.5, 0.5, 0.5, 0.5, 0.5, 0.5]', 'chp_p_nominal': '[1500, 2000, 2000, 1500, 2000, 2000]', 'chp_q_nominal': '[4000, 5000, 5000, 4000, 5000, 5000]', 'chp_eta_total': '[0.9, 0.9, 0.9, 0.9, 0.9, 0.9]', 'chp_lal': '[0.5, 0.5, 0.5, 0.5, 0.5, 0.5]', 'PVarea': '[30, 0, 0, 30, 0, 0]', 'bat_capacity': '[100000, 0, 0, 100000, 0, 0]', 'list_etaCharge': '[0.96, 0.96, 0.96, 0.96, 0.96, 0.96]', 'list_etaDischarge': '[0.95, 0.95, 0.95, 0.95, 0.95, 0.95]'}), "(list_types=['HP', 'CHP', 'CHP', 'HP', 'CHP',\n 'CHP'], year=2010, timestep=3600, livingArea=[120, 130, 140, 120, 130, \n 140], b_Space_heat_demand=True, specificDemandSH=[100, 110, 120, 100, \n 110, 120], annualDemandel=[3000, 3000, 3000, 3000, 3000, 3000],\n profileType=['H0', 'H0', 'H0', 'H0', 'H0', 'H0'], methodel=[1, 1, 1, 1,\n 1, 1], b_domestic_hot_water=False, b_el_demand=True, roof_usabl_pv_area\n =[30, 30, 30, 30, 30, 30], boiler_q_nominal=[3000, 5500, 6000, 3000, \n 5500, 6000], boiler_eta=[0.9, 0.9, 0.9, 0.9, 0.9, 0.9], boiler_lal=[0.5,\n 0.5, 0.5, 0.5, 0.5, 0.5], tes_capacity=[700, 700, 700, 700, 700, 700],\n tes_k_loss=[0, 0, 0, 0, 0, 0], tes_t_max=[95, 95, 95, 95, 95, 95],\n eh_q_nominal=[4000, 4000, 4000, 4000, 4000, 4000], hp_q_nominal=[7000, \n 7000, 7000, 7000, 7000, 7000], hp_lal=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5],\n chp_p_nominal=[1500, 2000, 2000, 1500, 2000, 2000], chp_q_nominal=[4000,\n 5000, 5000, 4000, 5000, 5000], chp_eta_total=[0.9, 0.9, 0.9, 0.9, 0.9, \n 0.9], chp_lal=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5], PVarea=[30, 0, 0, 30, 0, \n 0], bat_capacity=[100000, 0, 0, 100000, 0, 0], list_etaCharge=[0.96, \n 0.96, 0.96, 0.96, 0.96, 0.96], list_etaDischarge=[0.95, 0.95, 0.95, \n 0.95, 0.95, 0.95])\n", (21184, 22446), True, 'import pycity_calc.simulation.energy_balance_optimization.test_city as testcity\n'), ((22783, 22810), 'pycity_calc.simulation.energy_balance_optimization.energy_balance_building.calculator', 'EB2.calculator', (['city_object'], {}), '(city_object)\n', (22797, 22810), True, 'import pycity_calc.simulation.energy_balance_optimization.energy_balance_building as EB2\n'), ((23106, 23163), 'pickle.dump', 'pickle.dump', (['city_object', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(city_object, output, pickle.HIGHEST_PROTOCOL)\n', (23117, 23163), False, 'import pickle\n'), ((23172, 23229), 'pickle.dump', 'pickle.dump', (['dict_supply', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(dict_supply, output, pickle.HIGHEST_PROTOCOL)\n', (23183, 23229), False, 'import pickle\n'), ((5151, 5170), 'numpy.zeros', 'np.zeros', (['timesteps'], {}), '(timesteps)\n', (5159, 5170), True, 'import numpy as np\n'), ((5187, 5206), 'numpy.zeros', 'np.zeros', (['timesteps'], {}), '(timesteps)\n', (5195, 5206), True, 'import numpy as np\n')] |
import numpy as np
from base64 import b64decode
from json import loads
import matplotlib.pyplot as plt
""" algorithm to classify unlabeled data into K classes using K_means_clustering algorithm
devloper -> <NAME>
bilding the K means clusturing machine learning model from scrach
"""
class k_mean_cluster:
def __init__(self):
"""default constructor"""
pass
def fit(self,data):
"""input your training dataset to model"""
self.clusters=data #store our training data set as a 2D Tensor(matrix) features in rows and samples in columns
def eulidian_norm(self,x,y):
"""calculate distance between two n dimention points in eulidian space"""
return np.sqrt(np.dot((x-y)**2,np.ones(x.shape[0]))) #returns the scalar magnitude of distance
def asign_centeroids(self):
"""assign nearest centeroid to each of the datapoints as we assign it to centeroids cluster"""
for i,cluster in enumerate(self.clusters): #iterate through each data points
distance=np.zeros((self.k,1)) #vector to represent distance of each point from every centeroid
for j,centroid in enumerate(self.centroids): #itereate through every centeroid points
distance[j]=self.eulidian_norm(cluster,centroid) #calculate their distance wrt the the current data point
minm=np.argmin(distance) #select the closest of the all centeroid
self.cluster_index_list[minm]=self.cluster_index_list[minm]+[i] #assign the datapoint to the closet centeroid's cluster
def center_of_mass(self,cluster_list):
"""to calculate mean vector for position of centeroid among its cluster or at its center of mass"""
ndpoint=np.zeros(self.centroids.shape[1]) #to store the mean position vector of a group of cluster
for i in cluster_list: #iterate through all the points in the the cluster of the centeroid
ndpoint=ndpoint+self.clusters[i] #vector sumation of all their positions
return ndpoint/len(cluster_list) #return their arithmetic mean
def move_centroids(self):
"""to calculate mean vector for position of centeroid among its cluster or at its center of mass"""
for i,cluster_list in enumerate(self.cluster_index_list): #iterate through all centeroid points
if not len(cluster_list)==0: #if the centeroid has a cluster of datapoints
self.centroids[i] = self.center_of_mass(cluster_list) #update the centeroid location to the center of mass
def initial_cent(self):
"""to assign initial guessed (not random) position to centeroids and expected cluster to datapoints
this boosts the performance and reduces overall iteration in most cases"""
dist={} #dictionary to store datapoint index(key) vs distance from origin(value)
origin=np.zeros(self.centroids.shape[1]) #cordinate of origin (zero)
for i,cluster in enumerate(self.clusters): #iterate through each data points
dist[i]=self.eulidian_norm(origin,cluster) #compute and store their distance from origin
itr=0 #monitor iteration
for key, value in sorted(dist.items(), key=lambda x: x[1]): #iterate through sorted dictionary of distances(values)
j=itr*self.k//self.clusters.shape[0] #distribute ito groups and assign to each cluster centeroid
self.cluster_index_list[j]=self.cluster_index_list[j]+[key] #assign datapoints to rspective clustering centeroid
itr=itr+1 #update counter
self.move_centroids() #wemove the centeroids to the mean position of the assigned cluste
def train(self,k,maxitr=30):
"""to train our model to gnerate 'k' cluster of data points"""
self.k=k #store the required no of cluster in the process
self.centroids=np.zeros((k,self.clusters.shape[1]),dtype=float) #stores centeroid location
self.cluster_index_list=[[]]*k # stores index of assignedclusters for a particuar centroid
self.initial_cent() #assigns initial centeroid positions
prev=None #compare changes to verify convergence
itr=0 #count iteration
while itr<maxitr and prev!=self.cluster_index_list: #iterate while we are in max itration budget until convergence hen model is trained
prev=list(self.cluster_index_list) #store current values for comparison
self.cluster_index_list=[[]]*k #empty list for storing new centeroid positions
self.asign_centeroids() #assign cluster of data to each centeroid
self.move_centroids() #move the centeroids to their center of cluster
itr=itr+1 #update iteration
return self.centroids,self.cluster_index_list,itr #return our calculated results
def predict(self,point):
"""to predict some objects belonging to any clusters"""
distance=np.zeros((self.k,1))
for j,centroid in enumerate(self.centroids): #itereate through every centeroid points
distance[j]=self.eulidian_norm(cluster,centroid) #calculate their distance wrt the the current data point
class_index=np.argmin(distance) #compute nearest cluster centeroid
return class_index #return the cluster index
def parse(x):
""""decode our file data """
digit = loads(x)
array = np.fromstring(b64decode(digit["data"]),dtype=np.ubyte)
array = array.astype(np.float64)
return array
digits=[] #we will store all image data as array (1D)
with open("digits.base64.json","r") as f: #open our MNIST handwritten dataset of images
for s in f.readlines(): #for each image in the file
digits.append(parse(s)) #add the image to list as a list of 28*28(784) size
digits=np.array(digits) # convert the image datas into a 2D array
def display_digit(digit, k):
"""this function displays few images from 'digit'
and 'k' contains index of the images to be displayed"""
m=int(np.sqrt(len(k))) #fix our rows
n=len(k)//m +1 #fix our columns
plt.figure()
for i,loc in enumerate(k): #for all images
plt.subplot(m,n,i+1) #in the next subplot locatiom
image = digit[loc]
fig = plt.imshow(image.reshape(28,28)) #reshape image into its 2D format 28x28
fig.set_cmap('gray_r')
plt.show() #show our images
kmc=k_mean_cluster() #create object for storing our clusturing model
kmc.fit(digits[:10000]) #choose first 10,000 images of total 60,000 for training to make training fast
c,cil,itr=kmc.train(10) #train on the dataset making 10 clusters in actual MNIST data set
display_digit(digits,cil[0][:20]) #display some 20 digits belongs to our first cluuster
| [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"json.loads",
"numpy.zeros",
"numpy.ones",
"numpy.argmin",
"base64.b64decode",
"matplotlib.pyplot.figure",
"numpy.array"
] | [((7205, 7221), 'numpy.array', 'np.array', (['digits'], {}), '(digits)\n', (7213, 7221), True, 'import numpy as np\n'), ((6598, 6606), 'json.loads', 'loads', (['x'], {}), '(x)\n', (6603, 6606), False, 'from json import loads\n'), ((7646, 7658), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7656, 7658), True, 'import matplotlib.pyplot as plt\n'), ((8075, 8085), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8083, 8085), True, 'import matplotlib.pyplot as plt\n'), ((1952, 1985), 'numpy.zeros', 'np.zeros', (['self.centroids.shape[1]'], {}), '(self.centroids.shape[1])\n', (1960, 1985), True, 'import numpy as np\n'), ((3321, 3354), 'numpy.zeros', 'np.zeros', (['self.centroids.shape[1]'], {}), '(self.centroids.shape[1])\n', (3329, 3354), True, 'import numpy as np\n'), ((4602, 4652), 'numpy.zeros', 'np.zeros', (['(k, self.clusters.shape[1])'], {'dtype': 'float'}), '((k, self.clusters.shape[1]), dtype=float)\n', (4610, 4652), True, 'import numpy as np\n'), ((6074, 6095), 'numpy.zeros', 'np.zeros', (['(self.k, 1)'], {}), '((self.k, 1))\n', (6082, 6095), True, 'import numpy as np\n'), ((6352, 6371), 'numpy.argmin', 'np.argmin', (['distance'], {}), '(distance)\n', (6361, 6371), True, 'import numpy as np\n'), ((6634, 6658), 'base64.b64decode', 'b64decode', (["digit['data']"], {}), "(digit['data'])\n", (6643, 6658), False, 'from base64 import b64decode\n'), ((7757, 7781), 'matplotlib.pyplot.subplot', 'plt.subplot', (['m', 'n', '(i + 1)'], {}), '(m, n, i + 1)\n', (7768, 7781), True, 'import matplotlib.pyplot as plt\n'), ((1171, 1192), 'numpy.zeros', 'np.zeros', (['(self.k, 1)'], {}), '((self.k, 1))\n', (1179, 1192), True, 'import numpy as np\n'), ((1549, 1568), 'numpy.argmin', 'np.argmin', (['distance'], {}), '(distance)\n', (1558, 1568), True, 'import numpy as np\n'), ((816, 835), 'numpy.ones', 'np.ones', (['x.shape[0]'], {}), '(x.shape[0])\n', (823, 835), True, 'import numpy as np\n')] |
"""
This module declares the Bayesian regression tree models:
* PerpendicularRegressionTree
* HyperplaneRegressionTree
"""
import numpy as np
from abc import ABC
from scipy.special import gammaln
from sklearn.base import RegressorMixin
from bayesian_decision_tree.base import BaseTree
from bayesian_decision_tree.base_hyperplane import BaseHyperplaneTree
from bayesian_decision_tree.base_perpendicular import BasePerpendicularTree
class BaseRegressionTree(BaseTree, ABC, RegressorMixin):
"""
Abstract base class of all Bayesian regression trees (perpendicular and hyperplane). Performs
medium-level fitting and prediction tasks and outsources the low-level work to subclasses.
"""
def __init__(self, partition_prior, prior, delta, prune, child_type, split_precision, level=0):
BaseTree.__init__(self, partition_prior, prior, delta, prune, child_type, True, split_precision, level)
def _check_target(self, y):
if y.ndim != 1:
raise ValueError('y should have 1 dimension but has {}'.format(y.ndim))
def _compute_log_p_data_no_split(self, y, prior):
y_sum = y.sum()
y_squared_sum = (y ** 2).sum()
n = len(y)
mu_post, kappa_post, alpha_post, beta_post = self._compute_posterior_internal(prior, n, y_sum, y_squared_sum)
log_p_prior = np.log(1 - self.partition_prior**(1 + self.level))
log_p_data = self._compute_log_p_data(prior, alpha_post, beta_post, kappa_post, n)
return log_p_prior + log_p_data
def _compute_log_p_data_split(self, y, prior, n_dim, split_indices):
n = len(y)
n1 = np.arange(1, n)
n2 = n - n1
y_sum1 = y.cumsum()[:-1]
y_sum2 = y.sum() - y_sum1
y_squared_sum1 = (y[:-1] ** 2).cumsum()
y_squared_sum2 = (y ** 2).sum() - y_squared_sum1
if len(split_indices) != len(y)-1:
# we are *not* splitting between all data points -> indexing necessary
split_indices_minus_1 = split_indices - 1
n1 = n1[split_indices_minus_1]
n2 = n2[split_indices_minus_1]
y_sum1 = y_sum1[split_indices_minus_1]
y_sum2 = y_sum2[split_indices_minus_1]
y_squared_sum1 = y_squared_sum1[split_indices_minus_1]
y_squared_sum2 = y_squared_sum2[split_indices_minus_1]
mu1, kappa1, alpha1, beta1 = self._compute_posterior_internal(prior, n1, y_sum1, y_squared_sum1)
mu2, kappa2, alpha2, beta2 = self._compute_posterior_internal(prior, n2, y_sum2, y_squared_sum2)
n_splits = len(split_indices)
log_p_prior = np.log(self.partition_prior**(1+self.level) / (n_splits * n_dim))
log_p_data1 = self._compute_log_p_data(prior, alpha1, beta1, kappa1, n1)
log_p_data2 = self._compute_log_p_data(prior, alpha2, beta2, kappa2, n2)
return log_p_prior + log_p_data1 + log_p_data2
def _get_prior(self, n_data, n_dim):
if self.prior is not None:
return self.prior
else:
# TODO: use actual data to compute mu and tau
prior_pseudo_observation_count = max(1, n_data//100)
mu = 0
tau = 1
kappa = prior_pseudo_observation_count
alpha = prior_pseudo_observation_count/2
beta = alpha/tau
return np.array([mu, kappa, alpha, beta])
def _compute_posterior(self, y, prior, delta=1):
if delta == 0:
return prior
n = len(y)
y_sum = y.sum()
y_squared_sum = (y ** 2).sum()
return self._compute_posterior_internal(prior, n, y_sum, y_squared_sum, delta)
def _compute_posterior_internal(self, prior, n, y_sum, y_squared_sum, delta=1):
mu, kappa, alpha, beta = prior
# see https://www.cs.ubc.ca/~murphyk/Papers/bayesGauss.pdf, equations (86) - (89)
n_delta = n*delta
kappa_post = kappa + n_delta
mu_post = (kappa * mu + n_delta * y_sum / n) / kappa_post
alpha_post = alpha + 0.5*n_delta
beta_post = beta + 0.5 * delta * (y_squared_sum - y_sum ** 2 / n) + 0.5 * kappa * n_delta * (
y_sum / n - mu) ** 2 / (kappa + n)
return mu_post, kappa_post, alpha_post, beta_post
def _compute_posterior_mean(self):
return self.posterior_[0] # mu is the posterior mean
def _compute_log_p_data(self, prior, alpha_new, beta_new, kappa_new, n_new):
mu, kappa, alpha, beta = prior
# see https://www.cs.ubc.ca/~murphyk/Papers/bayesGauss.pdf, equation (95)
return (gammaln(alpha_new) - gammaln(alpha)
+ alpha*np.log(beta) - alpha_new*np.log(beta_new)
+ 0.5*np.log(kappa/kappa_new)
- 0.5*n_new*np.log(2*np.pi))
def _predict_leaf(self):
# predict posterior mean
return self._compute_posterior_mean()
def _get_raw_leaf_data_internal(self):
# prior and posterior raw data
return np.array([self.prior, self.posterior_])
class PerpendicularRegressionTree(BasePerpendicularTree, BaseRegressionTree):
"""
Bayesian regression tree using axes-normal splits ("perpendicular").
Uses a Normal-gamma(mu, kappa, alpha, beta) prior assuming unknown mean and unknown variance.
Parameters
----------
partition_prior : float, must be > 0.0 and < 1.0, typical value: 0.9
The prior probability of splitting a node's data into two children.
Small values tend to reduce the tree depth, leading to less expressiveness
but also to less overfitting.
Large values tend to increase the tree depth and thus lead to the tree
better fitting the data, which can lead to overfitting.
prior : array_like, shape = [4]
The prior hyperparameters [mu, kappa, alpha, beta] of the Normal-gamma
distribution (see also [1], [2], [3]):
- mu: prior pseudo-observation sample mean
- kappa: prior pseudo-observation count used to compute mu
- alpha: (prior pseudo-observation count used to compute sample variance)/2
- beta: alpha * (prior pseudo-observation sample variance)
It is usually easier to compute these hyperparameters off more intuitive
base quantities, see examples section.
delta : float, default=0.0
Determines the strengthening of the prior as the tree grows deeper,
see [1]. Must be a value between 0.0 and 1.0.
split_precision : float, default=0.0
Determines the minimum distance between two contiguous points to consider a split. If the distance is below
this threshold, the points are considered to overlap along this direction.
level : DO NOT SET, ONLY USED BY SUBCLASSES
See also
--------
demo_regression_perpendicular.py
PerpendicularClassificationTree
HyperplaneRegressionTree
References
----------
.. [1] https://en.wikipedia.org/wiki/Normal-gamma_distribution
.. [2] https://en.wikipedia.org/wiki/Normal-gamma_distribution#Interpretation_of_parameters
.. [3] https://en.wikipedia.org/wiki/Conjugate_prior#Continuous_distributions
Examples
--------
It is usually convenient to compute the prior hyperparameters as follows:
>>> # prior mean; set to the mean of the target
>>> mu = ...
>>>
>>> # prior standard deviation; set to about 0.1 times the standard deviation of the target
>>> sd_prior = ...
>>>
>>> # the number of prior pseudo-observations; set to roughly 1 - 10 % of the number of training samples
>>> prior_pseudo_observations = ...
>>>
>>> # now compute the prior
>>> kappa = prior_pseudo_observations
>>> alpha = prior_pseudo_observations/2
>>> beta = alpha*sd_prior**2
>>> prior = [mu, kappa, alpha, beta]
See `demo_regression_perpendicular.py`.
"""
def __init__(self, partition_prior=0.99, prior=None, delta=0, prune=False, split_precision=0.0, level=0):
child_type = PerpendicularRegressionTree
BasePerpendicularTree.__init__(self, partition_prior, prior, delta, prune, child_type, True, split_precision, level)
BaseRegressionTree.__init__(self, partition_prior, prior, delta, prune, child_type, split_precision, level)
class HyperplaneRegressionTree(BaseHyperplaneTree, BaseRegressionTree):
"""
Bayesian regression tree using arbitrarily-oriented hyperplane splits.
Uses a Normal-gamma(mu, kappa, alpha, beta) prior assuming unknown mean and unknown variance.
Parameters
----------
partition_prior : float, must be > 0.0 and < 1.0, typical value: 0.9
The prior probability of splitting a node's data into two children.
Small values tend to reduce the tree depth, leading to less expressiveness
but also to less overfitting.
Large values tend to increase the tree depth and thus lead to the tree
better fitting the data, which can lead to overfitting.
prior : array_like, shape = [4]
The prior hyperparameters [mu, kappa, alpha, beta] of the Normal-gamma
distribution (see also [1], [2], [3]):
- mu: prior pseudo-observation sample mean
- kappa: prior pseudo-observation count used to compute mu
- alpha: (prior pseudo-observation count used to compute sample variance)/2
- beta: alpha * (prior pseudo-observation sample variance)
It is usually easier to compute these hyperparameters off more intuitive
base quantities, see examples section.
delta : float, default=0.0
Determines the strengthening of the prior as the tree grows deeper,
see [1]. Must be a value between 0.0 and 1.0.
optimizer : object
A global optimization algorithm object that performs optimal hyperparameter
orientation search. The available options are (in the order in which you should
try them):
- ScipyOptimizer: A wrapper around scipy global optimizers. See usages for examples.
- SimulatedAnnealingOptimizer: Experimental, but works well with n_scan=20, n_keep=10, spread_factor=0.95
- RandomHyperplaneOptimizer: Experimental, mediocre performance
- RandomTwoPointOptimizer: Experimental, mediocre performance
- GradientDescentOptimizer: Experimental, mediocre performance
split_precision : float, default=0.0
Determines the minimum distance between two contiguous points to consider a split. If the distance is below
this threshold, the points are considered to overlap along this direction.
level : DO NOT SET, ONLY USED BY SUBCLASSES
See also
--------
demo_regression_hyperplane.py
HyperplaneClassificationTree
PerpendicularRegressionTree
References
----------
.. [1] https://en.wikipedia.org/wiki/Normal-gamma_distribution
.. [2] https://en.wikipedia.org/wiki/Normal-gamma_distribution#Interpretation_of_parameters
.. [3] https://en.wikipedia.org/wiki/Conjugate_prior#Continuous_distributions
Examples
--------
It is usually convenient to compute the prior hyperparameters in the same manner as for
the perpendicular case, see PerpendicularRegressionTree.
See `demo_regression_hyperplane.py`.
"""
def __init__(self, partition_prior=0.99, prior=None, delta=0, prune=False, optimizer=None, split_precision=0.0, level=0):
child_type = HyperplaneRegressionTree
BaseHyperplaneTree.__init__(self, partition_prior, prior, delta, prune, child_type, True, optimizer, split_precision, level)
BaseRegressionTree.__init__(self, partition_prior, prior, delta, prune, child_type, split_precision, level)
| [
"numpy.log",
"bayesian_decision_tree.base_perpendicular.BasePerpendicularTree.__init__",
"bayesian_decision_tree.base.BaseTree.__init__",
"bayesian_decision_tree.base_hyperplane.BaseHyperplaneTree.__init__",
"numpy.array",
"numpy.arange",
"scipy.special.gammaln"
] | [((809, 917), 'bayesian_decision_tree.base.BaseTree.__init__', 'BaseTree.__init__', (['self', 'partition_prior', 'prior', 'delta', 'prune', 'child_type', '(True)', 'split_precision', 'level'], {}), '(self, partition_prior, prior, delta, prune, child_type, \n True, split_precision, level)\n', (826, 917), False, 'from bayesian_decision_tree.base import BaseTree\n'), ((1331, 1383), 'numpy.log', 'np.log', (['(1 - self.partition_prior ** (1 + self.level))'], {}), '(1 - self.partition_prior ** (1 + self.level))\n', (1337, 1383), True, 'import numpy as np\n'), ((1620, 1635), 'numpy.arange', 'np.arange', (['(1)', 'n'], {}), '(1, n)\n', (1629, 1635), True, 'import numpy as np\n'), ((2604, 2673), 'numpy.log', 'np.log', (['(self.partition_prior ** (1 + self.level) / (n_splits * n_dim))'], {}), '(self.partition_prior ** (1 + self.level) / (n_splits * n_dim))\n', (2610, 2673), True, 'import numpy as np\n'), ((4955, 4994), 'numpy.array', 'np.array', (['[self.prior, self.posterior_]'], {}), '([self.prior, self.posterior_])\n', (4963, 4994), True, 'import numpy as np\n'), ((8006, 8126), 'bayesian_decision_tree.base_perpendicular.BasePerpendicularTree.__init__', 'BasePerpendicularTree.__init__', (['self', 'partition_prior', 'prior', 'delta', 'prune', 'child_type', '(True)', 'split_precision', 'level'], {}), '(self, partition_prior, prior, delta, prune,\n child_type, True, split_precision, level)\n', (8036, 8126), False, 'from bayesian_decision_tree.base_perpendicular import BasePerpendicularTree\n'), ((11409, 11537), 'bayesian_decision_tree.base_hyperplane.BaseHyperplaneTree.__init__', 'BaseHyperplaneTree.__init__', (['self', 'partition_prior', 'prior', 'delta', 'prune', 'child_type', '(True)', 'optimizer', 'split_precision', 'level'], {}), '(self, partition_prior, prior, delta, prune,\n child_type, True, optimizer, split_precision, level)\n', (11436, 11537), False, 'from bayesian_decision_tree.base_hyperplane import BaseHyperplaneTree\n'), ((3324, 3358), 'numpy.array', 'np.array', (['[mu, kappa, alpha, beta]'], {}), '([mu, kappa, alpha, beta])\n', (3332, 3358), True, 'import numpy as np\n'), ((4731, 4748), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (4737, 4748), True, 'import numpy as np\n'), ((4679, 4704), 'numpy.log', 'np.log', (['(kappa / kappa_new)'], {}), '(kappa / kappa_new)\n', (4685, 4704), True, 'import numpy as np\n'), ((4640, 4656), 'numpy.log', 'np.log', (['beta_new'], {}), '(beta_new)\n', (4646, 4656), True, 'import numpy as np\n'), ((4555, 4573), 'scipy.special.gammaln', 'gammaln', (['alpha_new'], {}), '(alpha_new)\n', (4562, 4573), False, 'from scipy.special import gammaln\n'), ((4576, 4590), 'scipy.special.gammaln', 'gammaln', (['alpha'], {}), '(alpha)\n', (4583, 4590), False, 'from scipy.special import gammaln\n'), ((4615, 4627), 'numpy.log', 'np.log', (['beta'], {}), '(beta)\n', (4621, 4627), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate
from oneibl.one import ONE
from ibllib.io import spikeglx
import alf.io
from scipy.io import savemat
from brainbox.io.one import load_channel_locations
from scipy import signal
import brainbox as bb
from pathlib import Path
import pandas as pd
from numpy.random import randint
from brainbox.processing import bincount2D
import matplotlib.pyplot as plt
import ibllib.plots as iblplt
from collections import Counter
import os
plt.ion()
'''
This script includes functions to align lfp data,
plot time seires overlayed to DLC-detected licks
'''
# MAXIMUM 3
# ['c9fec76e-7a20-4da4-93ad-04510a89473b',
# 'probe01',
# ['hoferlab', 'Subjects', 'SWC_015', '2020-01-21', '002'],
# 3885],
#[['d33baf74-263c-4b37-a0d0-b79dcb80a764',
# 'probe00',
# ['mainenlab', 'Subjects', 'ZM_2240', '2020-01-21', '001'],
# 1229],
# ['a8a8af78-16de-4841-ab07-fde4b5281a03',
# 'probe01',
# ['angelakilab', 'Subjects', 'NYU-12', '2020-01-22', '001'],
# 448]
#eid_probe =
#[['a8a8af78-16de-4841-ab07-fde4b5281a03','probe01'],
#['c9fec76e-7a20-4da4-93ad-04510a89473b','probe01'],
#['d33baf74-263c-4b37-a0d0-b79dcb80a764','probe00']]
def check_for_saturation(eid,probes):
'''
This functions reads in spikes for a given session,
bins them into time bins and computes for how many of them,
there is too little activity across all channels such that
this must be an artefact (saturation)
'''
T_BIN = 0.2 # time bin in sec
ACT_THR = 0.05 # maximal activity for saturated segment
print('Bin size: %s [ms]' % T_BIN)
print('Activity threshold: %s [fraction]' % ACT_THR)
#probes = ['probe00', 'probe01']
probeDict = {'probe00': 'probe_left', 'probe01': 'probe_right'}
one = ONE()
dataset_types = ['spikes.times', 'spikes.clusters']
D = one.load(eid, dataset_types=dataset_types, dclass_output=True)
alf_path = Path(D.local_path[0]).parent.parent
print(alf_path)
l = []
for probe in probes:
probe_path = alf_path / probe
if not probe_path.exists():
probe_path = alf_path / probeDict[probe]
if not probe_path.exists():
print("% s doesn't exist..." % probe)
continue
try:
spikes = alf.io.load_object(probe_path, 'spikes')
except:
continue
# bin spikes
R, times, Clusters = bincount2D(
spikes['times'], spikes['clusters'], T_BIN)
saturated_bins = np.where(np.mean(R, axis=0) < 0.15)[0]
if len(saturated_bins) > 1:
print('WARNING: Saturation present!')
print(probe)
print('Number of saturated bins: %s of %s' %
(len(saturated_bins), len(times)))
l.append(['%s_%s' %(eid, probe), times[saturated_bins]])
np.save('/home/mic/saturation_scan2/%s.npy' %eid, l)
return l
def plot_saturation():
'''
plot the number of segments that are quiet in terms of spikes
'''
plt.ion()
results_folder = '/home/mic/saturation_scan/'
t=list(os.walk(results_folder))[0][-1]
sess_info = []
sat_segs = []
for ii in t:
try:
a = np.load(results_folder + ii)
except:
print("could't load %s" %ii)
continue
sess_info.append(a[:,0])
sat_segs.append(a[:,1])
flat_sess_info = [item for sublist in sess_info for item in sublist]
flat_sat_segs = [int(item) for sublist in sat_segs for item in sublist]
maxes = np.where(np.array(flat_sat_segs)>10)
height = np.array(flat_sat_segs)[maxes] #flat_sat_segs
bars = np.array(flat_sess_info)[maxes]
one = ONE()
#flat_sess_info
y_pos = np.arange(len(bars))
# Create horizontal bars
plt.barh(y_pos, height)
# Create names on the y-axis
plt.yticks(y_pos, bars, fontsize = 10)
plt.xlabel('number of saturated 200 ms segments')
plt.title('sessions with histology that meet behavior criterion for the BWM')
sess_info = bars
seg_info = height
return seg_info, sess_info
def get_info(seg_info, sess_info):
l = []
one = ONE()
for i in range(len(sess_info)):
sess = sess_info[i]
eid, probe = sess.split('_')
D = one.load(eid, dataset_types=['trials.intervals'], dclass_output=True)
l.append([eid, probe, str(Path(D.local_path[0]).parent.parent).split('/')[5:],seg_info[i]])
return l
def get_trials_from_times(eid):
'''
output number of quiet segments per trial number
'''
one = ONE()
sat_times_path = '/home/mic/saturation_scan2/%s.npy' %eid
sat_times_info = np.load(sat_times_path, allow_pickle=True)[0]
sat_times = sat_times_info[1]
D = one.load(eid, dataset_types=['trials.intervals'], dclass_output=True)
alf_path = Path(D.local_path[0]).parent.parent / 'alf'
trials = alf.io.load_object(alf_path, '_ibl_trials')
trials_with_sat = []
for t in range(len(trials['intervals'])):
ter = trials['intervals'][t]
for tt in sat_times:
if ter[0] < tt < ter[1]:
trials_with_sat.append(t)
C = Counter(trials_with_sat)
print(sat_times_info[0])
print(len(C), 'of', len(trials['intervals']), 'trials have at least one saturation event')
return C
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def align_data(eid, one, trial_idx, probe):
# change .ap to .lf to get LFP instead of high frequency band
# D['c607c5da-534e-4f30-97b3-b1d3e904e9fd']['probe01'] has Visp1, VISp2/3 and VISp4'
# '3663d82b-f197-4e8b-b299-7b803a155b84', 'left', [8,8], lick example
lf_paths = one.load(eid, dataset_types=[ 'ephysData.raw.meta',
'ephysData.raw.ch','ephysData.raw.sync',
'trials.intervals', 'trials.stimOn_times',
'trials.feedbackType','trials.goCue_times',
'trials.feedback_times','trials.contrastLeft',
'trials.contrastRight'],
download_only=True) #'ephysData.raw.ap',
lf_file = [x for x in lf_paths if probe in str(x) and 'ap.cbin' in str(x)][0]
sr = spikeglx.Reader(lf_file)
sync_file = sr.file_bin.parent.joinpath(sr.file_bin.stem.replace('.ap', '.sync.npy'))
sync = np.load(sync_file)
fs_sync = int(np.mean(np.diff(sync[:, 0]))) # sampled at 20 Hz?
# upsample sync signal to sr
sample2time = scipy.interpolate.interp1d(sync[:, 0] * sr.fs, sync[:, 1])
alf_path = [x for x in lf_paths if 'alf' in str(x)][0].parent
trials = alf.io.load_object(alf_path, '_ibl_trials')
# digitize to search idx only in small chunk
times_to_align_to = trials['intervals'][:,0]
binids = np.digitize(times_to_align_to, sync[:,1])
# get lfp aligned for specific trial (trial_idx)
t = trials['intervals'][:,0][trial_idx]
times = sample2time(np.arange((binids[trial_idx]-1) * fs_sync * sr.fs, binids[trial_idx] * fs_sync * sr.fs))
lfp_index = find_nearest(times, t)
startx = int(lfp_index + (binids[trial_idx]-1) * fs_sync * sr.fs) # in observations, 2500 Hz
print(startx)
t_end = trials['intervals'][:,1][trial_idx]
data_bounds = [int(startx), int((t_end - t) * sr.fs) + int(startx)] # in lfp frame idx
print(data_bounds)
data = sr[data_bounds[0]:data_bounds[1], :-1]
times_data = sample2time(np.arange(data_bounds[0],data_bounds[1]))
data = data - np.mean(data)
return data, times_data
def get_ap_partial(eid, one, t_start, probe):
# for a time point in seconds, get a 2 sec ap signal
#hofer sw 43: '7cdb71fb-928d-4eea-988f-0b655081f21c'
seg_length = 3 # data segment length in seconds, starting at t_start
fs = 30000
D=one.load(eid, dataset_types=['ephysData.raw.meta','ephysData.raw.sync'], dclass_output=True)
meta_file = [x for x in D.local_path if probe in str(x) and 'ap' in str(x)][0]
sync_file = meta_file.parent.joinpath(meta_file.stem.replace('.ap', '.sync.npy'))
sync = np.load(sync_file)
fs_sync = int(np.mean(np.diff(sync[:, 0])))
# upsample sync signal to sr
sample2time = scipy.interpolate.interp1d(sync[:, 0] * fs, sync[:, 1])
# digitize to search idx only in 20 sec chunk
binids = np.digitize(sync[:, 0], sync[:,1])
block_idx = np.where(sync[:, 0]>t_start)[0][0]
# get ap aligned for specific t_start
times = sample2time(np.arange((binids[block_idx]-1) * fs_sync * fs, binids[block_idx] * fs_sync * fs))
lfp_index = find_nearest(times, t_start)
startx = int(lfp_index + (binids[block_idx]-1) * fs_sync * fs)
#t_end = trials['intervals'][:,1][trial_idx]
t_end = t_start + seg_length # segment length in seconds
data_bounds = [int(startx), int((t_end - t_start) * fs) + int(startx)] # in lfp frame idx
# the ap data is downloaded in 30000 frame chunks
# make sure your start time is not at the limits of the recording
start_chunk = data_bounds[0] // fs
end_chunk = start_chunk + seg_length
pdict = {'probe00': 0,'probe01': 1}
probe_idx = pdict[probe]
dsets = one.alyx.rest(
'datasets', 'list', session=eid,
django='name__icontains,ap.cbin,collection__endswith,%s' %probe)
for fr in dsets[probe_idx]['file_records']:
if fr['data_url']:
url_cbin = fr['data_url']
dsets = one.alyx.rest(
'datasets', 'list', session=eid,
django='name__icontains,ap.ch,collection__endswith,%s' %probe)
for fr in dsets[probe_idx]['file_records']:
if fr['data_url']:
url_ch = fr['data_url']
ap_chunk = one.download_raw_partial(url_cbin, url_ch, start_chunk, end_chunk -1)
print(url_cbin,url_ch)
times_data = sample2time(np.arange(start_chunk * fs, end_chunk * fs))
return ap_chunk, times_data
def plot_ap(data, times_data):
plt.ion()
obs, chans = data.shape
#chans = [5,200,380]
#chans = 10
fig, ax = plt.subplots()
for i in range(chans)[::30]:
tplot = data[:,range(chans)[i]] - np.mean(data[:,range(chans)[i]])
plt.plot(times_data, tplot + i*2)
plt.title('ap_signal')
plt.xlabel('time [s]')
ax.set_yticklabels([])
plt.ylabel('every 15th channel')
def plot_all(eid,trial_idx, probe):
one =ONE()
## ap signal
#data, times_data = align_data(eid,one, trial_idx, probe)
#plot_ap(data, times_data)
#plot_raster_single_trial(one, eid, trial_idx, probe)
plot_rms(eid, probe)
plot_power_spectrum_lfp(eid, probe)
#return XYs
def plot_raster_single_trial(one, eid, trial_number, probe):
'''
Plot a rasterplot for a given trial,
ordered by insertion depth, with
'stimOn_times','feedback_times' and 'stimOff_times'
'''
dataset_types = ['clusters.depth','spikes.times', 'spikes.depths','spikes.clusters', 'trials.intervals']
D = one.load(eid, dataset_types = dataset_types, dclass_output=True)
alf_path = Path(D.local_path[0]).parent.parent / 'alf'
if str(alf_path.parent)[-3:] == 'alf':
alf_path = alf_path.parent
probe_path = alf_path / probe
spikes = alf.io.load_object(probe_path, 'spikes')
trials = alf.io.load_object(alf_path, 'trials')
T_BIN = 0.01 # time bin in sec
# bin spikes
R, times, Clusters = bincount2D(
spikes['times'], spikes['clusters'], T_BIN)
# Order activity by cortical depth of neurons
d = dict(zip(spikes['clusters'], spikes['depths']))
y = sorted([[i, d[i]] for i in d])
isort = np.argsort([x[1] for x in y])
R = R[isort, :]
# get trial number for each time bin
trial_numbers = np.digitize(times, trials['intervals'][:,0])
print('Range of trials: ', [trial_numbers[0], trial_numbers[-1]])
plt.figure('2')
plt.title('%s_%s_trial: %s' %(eid, probe, trial_number))
trial_number = trial_number +1
a = list(trial_numbers)
first = a.index(trial_number)
last = len(a) - 1 - a[::-1].index(trial_number)
plt.imshow(R[:, first:last], aspect='auto',
cmap='binary', vmax=T_BIN / 0.001 / 4,
extent=np.r_[times[[first, last]],
Clusters[[0, -1]]], origin='lower')
def restrict_timestamplist(q):
li = []
for i in q:
if i > times[first] and i < times[last]:
li.append(i)
return li
iblplt.vertical_lines(restrict_timestamplist(
trials['stimOn_times']), ymin=0, ymax=Clusters[-1],
color='m', linewidth=0.5, label='stimOn_times')
iblplt.vertical_lines(restrict_timestamplist(
trials['feedback_times']), ymin=0, ymax=Clusters[-1],
color='b', linewidth=0.5, label='feedback_times')
# iblplt.vertical_lines(restrict_timestamplist(
# trials['stimOff_times']), ymin=0, ymax=Clusters[-1],
# color='g', linewidth=0.5, label='stimOff_times')
plt.xlabel('Time (s)')
plt.ylabel('Cluster #; ordered by depth')
plt.legend()
plt.tight_layout()
plt.show()
def plot_rms(eid, probe_label):
# https://int-brain-lab.github.io/iblenv/notebooks_external/docs_get_rms_data.html
plt.ion()
# instantiate ONE
one = ONE()
# Specify subject, date and probe we are interested in
# subject = 'CSHL049'
# date = '2020-01-08'
# sess_no = 1
# probe_label = 'probe00'
# eid = one.search(subject=subject, date=date, number=sess_no)[0]
# Specify the dataset types of interest
dtypes = ['_iblqc_ephysTimeRms.rms',
'_iblqc_ephysTimeRms.timestamps',
'channels.rawInd',
'channels.localCoordinates']
# Download the data and get paths to downloaded data
_ = one.load(eid, dataset_types=dtypes, download_only=True)
ephys_path = one.path_from_eid(eid).joinpath('raw_ephys_data', probe_label)
alf_path = one.path_from_eid(eid).joinpath('alf', probe_label)
session_name = '_'.join(str(ephys_path).split('/')[5:10])
# Index of good recording channels along probe
chn_inds = np.load(alf_path.joinpath('channels.rawInd.npy'))
# Position of each recording channel along probe
chn_pos = np.load(alf_path.joinpath('channels.localCoordinates.npy'))
# Get range for y-axis
depth_range = [np.min(chn_pos[:, 1]), np.max(chn_pos[:, 1])]
# RMS data associated with AP band of data
rms_ap = alf.io.load_object(ephys_path, 'ephysTimeRmsAP', namespace='iblqc')
rms_ap_data = 20* np.log10(rms_ap['rms'][:, chn_inds] * 1e6) # convert to uV
# # Median subtract to clean up the data
# median = np.mean(np.apply_along_axis(lambda x: np.median(x), 1, rms_ap_data))
# # Add back the median so that the actual values in uV remain correct
# rms_ap_data_median = np.apply_along_axis(lambda x: x - np.median(x), 1, rms_ap_data) + median
# Get levels for colour bar and x-axis
ap_levels = np.quantile(rms_ap_data, [0.1, 0.9])
ap_time_range = [rms_ap['timestamps'][0], rms_ap['timestamps'][-1]]
# RMS data associated with LFP band of data
rms_lf = alf.io.load_object(ephys_path, 'ephysTimeRmsLF', namespace='iblqc')
rms_lf_data = rms_lf['rms'][:, chn_inds] * 1e6 # convert to uV
# Median subtract to clean up the data
# median = np.mean(np.apply_along_axis(lambda x: np.median(x), 1, rms_lf_data))
# rms_lf_data_median = np.apply_along_axis(lambda x: x - np.median(x), 1, rms_lf_data) + median
lf_levels = np.quantile(rms_lf_data, [0.1, 0.9])
lf_time_range = [rms_lf['timestamps'][0], rms_lf['timestamps'][-1]]
# Create figure
fig, ax = plt.subplots(2, 1, figsize=(6, 8))
# Plot the AP rms data
ax0 = ax[0]
# rms_ap_plot = ax0.imshow(rms_ap_data.T, extent=np.r_[ap_time_range, depth_range],
# cmap='plasma', vmin=ap_levels[0], vmax=ap_levels[1], origin='lower')
rms_ap_plot = ax0.imshow(rms_ap_data.T, extent=np.r_[ap_time_range, depth_range],
cmap='plasma', vmin=0, vmax=100, origin='lower')
cbar_ap = fig.colorbar(rms_ap_plot, ax=ax0)
cbar_ap.set_label('AP RMS (uV)')
ax0.set_xlabel('Time (s)')
ax0.set_ylabel('Depth along probe (um)')
ax0.set_title('RMS of AP band')
# Plot the LFP rms data
ax1 = ax[1]
# rms_lf_plot = ax1.imshow(rms_lf_data.T, extent=np.r_[lf_time_range, depth_range],
# cmap='inferno', vmin=lf_levels[0], vmax=lf_levels[1], origin='lower')
rms_lf_plot = ax1.imshow(rms_lf_data.T, extent=np.r_[lf_time_range, depth_range],
cmap='inferno', vmin=0, vmax=1500, origin='lower')
cbar_lf = fig.colorbar(rms_lf_plot, ax=ax1)
cbar_lf.set_label('LFP RMS (uV)')
ax1.set_xlabel('Time (s)')
ax1.set_ylabel('Depth along probe (um)')
ax1.set_title('RMS of LFP band')
plt.suptitle('%s_%s \n %s' %(eid, probe_label, session_name))
plt.savefig('/home/mic/saturation_analysis/rms_plots/%s_%s.png' %(eid, probe_label))
plt.show()
def plot_power_spectrum_lfp(eid, probe_label):
# instantiate ONE
one = ONE()
# # Specify subject, date and probe we are interested in
# subject = 'CSHL049'
# date = '2020-01-08'
# sess_no = 1
# probe_label = 'probe00'
# eid = one.search(subject=subject, date=date, number=sess_no)[0]
# Specify the dataset types of interest
dtypes = ['_iblqc_ephysSpectralDensity.freqs',
'_iblqc_ephysSpectralDensity.power',
'channels.rawInd',
'channels.localCoordinates']
# Download the data and get paths to downloaded data
_ = one.load(eid, dataset_types=dtypes, download_only=True)
ephys_path = one.path_from_eid(eid).joinpath('raw_ephys_data', probe_label)
alf_path = one.path_from_eid(eid).joinpath('alf', probe_label)
# Index of good recording channels along probe
chn_inds = np.load(alf_path.joinpath('channels.rawInd.npy'))
# Position of each recording channel along probe
chn_pos = np.load(alf_path.joinpath('channels.localCoordinates.npy'))
# Get range for y-axis
depth_range = [np.min(chn_pos[:, 1]), np.max(chn_pos[:, 1])]
# Load in power spectrum data
lfp_spectrum = alf.io.load_object(ephys_path, 'ephysSpectralDensityLF', namespace='iblqc')
lfp_freq = lfp_spectrum['freqs']
lfp_power = lfp_spectrum['power'][:, chn_inds]
# Define a frequency range of interest
freq_range = [0, 300]
freq_idx = np.where((lfp_freq >= freq_range[0]) &
(lfp_freq < freq_range[1]))[0]
# Limit data to freq range of interest and also convert to dB
lfp_spectrum_data = 10 * np.log(lfp_power[freq_idx, :])
dB_levels = np.quantile(lfp_spectrum_data, [0.1, 0.9])
# Create figure
fig, ax = plt.subplots()
# Plot the LFP spectral data
spectrum_plot = ax.imshow(lfp_spectrum_data.T, extent=np.r_[freq_range, depth_range],
cmap='viridis', vmin=dB_levels[0], vmax=dB_levels[1], origin='lower',
aspect='auto')
cbar = fig.colorbar(spectrum_plot, ax=ax)
cbar.set_label('LFP power (dB)')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Depth along probe (um)')
#ax.set_title('Power Spectrum of LFP')
# plt.show()
session_name = '_'.join(str(ephys_path).split('/')[5:10])
plt.suptitle('%s_%s \n %s' %(eid, probe_label, session_name))
plt.savefig('/home/mic/saturation_analysis/PSD_plots/%s_%s.png' %(eid, probe_label))
| [
"matplotlib.pyplot.title",
"numpy.load",
"numpy.abs",
"matplotlib.pyplot.suptitle",
"os.walk",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"pathlib.Path",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"ibllib.io.spi... | [((503, 512), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (510, 512), True, 'import matplotlib.pyplot as plt\n'), ((1791, 1796), 'oneibl.one.ONE', 'ONE', ([], {}), '()\n', (1794, 1796), False, 'from oneibl.one import ONE\n'), ((2900, 2953), 'numpy.save', 'np.save', (["('/home/mic/saturation_scan2/%s.npy' % eid)", 'l'], {}), "('/home/mic/saturation_scan2/%s.npy' % eid, l)\n", (2907, 2953), True, 'import numpy as np\n'), ((3102, 3111), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (3109, 3111), True, 'import matplotlib.pyplot as plt\n'), ((3809, 3814), 'oneibl.one.ONE', 'ONE', ([], {}), '()\n', (3812, 3814), False, 'from oneibl.one import ONE\n'), ((3911, 3934), 'matplotlib.pyplot.barh', 'plt.barh', (['y_pos', 'height'], {}), '(y_pos, height)\n', (3919, 3934), True, 'import matplotlib.pyplot as plt\n'), ((3978, 4014), 'matplotlib.pyplot.yticks', 'plt.yticks', (['y_pos', 'bars'], {'fontsize': '(10)'}), '(y_pos, bars, fontsize=10)\n', (3988, 4014), True, 'import matplotlib.pyplot as plt\n'), ((4021, 4070), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of saturated 200 ms segments"""'], {}), "('number of saturated 200 ms segments')\n", (4031, 4070), True, 'import matplotlib.pyplot as plt\n'), ((4075, 4152), 'matplotlib.pyplot.title', 'plt.title', (['"""sessions with histology that meet behavior criterion for the BWM"""'], {}), "('sessions with histology that meet behavior criterion for the BWM')\n", (4084, 4152), True, 'import matplotlib.pyplot as plt\n'), ((4298, 4303), 'oneibl.one.ONE', 'ONE', ([], {}), '()\n', (4301, 4303), False, 'from oneibl.one import ONE\n'), ((4733, 4738), 'oneibl.one.ONE', 'ONE', ([], {}), '()\n', (4736, 4738), False, 'from oneibl.one import ONE\n'), ((5343, 5367), 'collections.Counter', 'Counter', (['trials_with_sat'], {}), '(trials_with_sat)\n', (5350, 5367), False, 'from collections import Counter\n'), ((5564, 5581), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (5574, 5581), True, 'import numpy as np\n'), ((6633, 6657), 'ibllib.io.spikeglx.Reader', 'spikeglx.Reader', (['lf_file'], {}), '(lf_file)\n', (6648, 6657), False, 'from ibllib.io import spikeglx\n'), ((6768, 6786), 'numpy.load', 'np.load', (['sync_file'], {}), '(sync_file)\n', (6775, 6786), True, 'import numpy as np\n'), ((7213, 7255), 'numpy.digitize', 'np.digitize', (['times_to_align_to', 'sync[:, 1]'], {}), '(times_to_align_to, sync[:, 1])\n', (7224, 7255), True, 'import numpy as np\n'), ((8547, 8565), 'numpy.load', 'np.load', (['sync_file'], {}), '(sync_file)\n', (8554, 8565), True, 'import numpy as np\n'), ((8797, 8832), 'numpy.digitize', 'np.digitize', (['sync[:, 0]', 'sync[:, 1]'], {}), '(sync[:, 0], sync[:, 1])\n', (8808, 8832), True, 'import numpy as np\n'), ((10450, 10459), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (10457, 10459), True, 'import matplotlib.pyplot as plt\n'), ((10544, 10558), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10556, 10558), True, 'import matplotlib.pyplot as plt\n'), ((10719, 10741), 'matplotlib.pyplot.title', 'plt.title', (['"""ap_signal"""'], {}), "('ap_signal')\n", (10728, 10741), True, 'import matplotlib.pyplot as plt\n'), ((10746, 10768), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time [s]"""'], {}), "('time [s]')\n", (10756, 10768), True, 'import matplotlib.pyplot as plt\n'), ((10805, 10837), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""every 15th channel"""'], {}), "('every 15th channel')\n", (10815, 10837), True, 'import matplotlib.pyplot as plt\n'), ((10885, 10890), 'oneibl.one.ONE', 'ONE', ([], {}), '()\n', (10888, 10890), False, 'from oneibl.one import ONE\n'), ((11943, 11997), 'brainbox.processing.bincount2D', 'bincount2D', (["spikes['times']", "spikes['clusters']", 'T_BIN'], {}), "(spikes['times'], spikes['clusters'], T_BIN)\n", (11953, 11997), False, 'from brainbox.processing import bincount2D\n'), ((12165, 12194), 'numpy.argsort', 'np.argsort', (['[x[1] for x in y]'], {}), '([x[1] for x in y])\n', (12175, 12194), True, 'import numpy as np\n'), ((12277, 12322), 'numpy.digitize', 'np.digitize', (['times', "trials['intervals'][:, 0]"], {}), "(times, trials['intervals'][:, 0])\n", (12288, 12322), True, 'import numpy as np\n'), ((12401, 12416), 'matplotlib.pyplot.figure', 'plt.figure', (['"""2"""'], {}), "('2')\n", (12411, 12416), True, 'import matplotlib.pyplot as plt\n'), ((12424, 12481), 'matplotlib.pyplot.title', 'plt.title', (["('%s_%s_trial: %s' % (eid, probe, trial_number))"], {}), "('%s_%s_trial: %s' % (eid, probe, trial_number))\n", (12433, 12481), True, 'import matplotlib.pyplot as plt\n'), ((12640, 12802), 'matplotlib.pyplot.imshow', 'plt.imshow', (['R[:, first:last]'], {'aspect': '"""auto"""', 'cmap': '"""binary"""', 'vmax': '(T_BIN / 0.001 / 4)', 'extent': 'np.r_[times[[first, last]], Clusters[[0, -1]]]', 'origin': '"""lower"""'}), "(R[:, first:last], aspect='auto', cmap='binary', vmax=T_BIN / \n 0.001 / 4, extent=np.r_[times[[first, last]], Clusters[[0, -1]]],\n origin='lower')\n", (12650, 12802), True, 'import matplotlib.pyplot as plt\n'), ((13529, 13551), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (13539, 13551), True, 'import matplotlib.pyplot as plt\n'), ((13556, 13597), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cluster #; ordered by depth"""'], {}), "('Cluster #; ordered by depth')\n", (13566, 13597), True, 'import matplotlib.pyplot as plt\n'), ((13602, 13614), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13612, 13614), True, 'import matplotlib.pyplot as plt\n'), ((13619, 13637), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13635, 13637), True, 'import matplotlib.pyplot as plt\n'), ((13642, 13652), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13650, 13652), True, 'import matplotlib.pyplot as plt\n'), ((13785, 13794), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (13792, 13794), True, 'import matplotlib.pyplot as plt\n'), ((13833, 13838), 'oneibl.one.ONE', 'ONE', ([], {}), '()\n', (13836, 13838), False, 'from oneibl.one import ONE\n'), ((15513, 15549), 'numpy.quantile', 'np.quantile', (['rms_ap_data', '[0.1, 0.9]'], {}), '(rms_ap_data, [0.1, 0.9])\n', (15524, 15549), True, 'import numpy as np\n'), ((16062, 16098), 'numpy.quantile', 'np.quantile', (['rms_lf_data', '[0.1, 0.9]'], {}), '(rms_lf_data, [0.1, 0.9])\n', (16073, 16098), True, 'import numpy as np\n'), ((16206, 16240), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(6, 8)'}), '(2, 1, figsize=(6, 8))\n', (16218, 16240), True, 'import matplotlib.pyplot as plt\n'), ((17497, 17559), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('%s_%s \\n %s' % (eid, probe_label, session_name))"], {}), "('%s_%s \\n %s' % (eid, probe_label, session_name))\n", (17509, 17559), True, 'import matplotlib.pyplot as plt\n'), ((17563, 17652), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('/home/mic/saturation_analysis/rms_plots/%s_%s.png' % (eid, probe_label))"], {}), "('/home/mic/saturation_analysis/rms_plots/%s_%s.png' % (eid,\n probe_label))\n", (17574, 17652), True, 'import matplotlib.pyplot as plt\n'), ((17652, 17662), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17660, 17662), True, 'import matplotlib.pyplot as plt\n'), ((17752, 17757), 'oneibl.one.ONE', 'ONE', ([], {}), '()\n', (17755, 17757), False, 'from oneibl.one import ONE\n'), ((19352, 19394), 'numpy.quantile', 'np.quantile', (['lfp_spectrum_data', '[0.1, 0.9]'], {}), '(lfp_spectrum_data, [0.1, 0.9])\n', (19363, 19394), True, 'import numpy as np\n'), ((19430, 19444), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (19442, 19444), True, 'import matplotlib.pyplot as plt\n'), ((20003, 20065), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('%s_%s \\n %s' % (eid, probe_label, session_name))"], {}), "('%s_%s \\n %s' % (eid, probe_label, session_name))\n", (20015, 20065), True, 'import matplotlib.pyplot as plt\n'), ((20069, 20158), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('/home/mic/saturation_analysis/PSD_plots/%s_%s.png' % (eid, probe_label))"], {}), "('/home/mic/saturation_analysis/PSD_plots/%s_%s.png' % (eid,\n probe_label))\n", (20080, 20158), True, 'import matplotlib.pyplot as plt\n'), ((2441, 2495), 'brainbox.processing.bincount2D', 'bincount2D', (["spikes['times']", "spikes['clusters']", 'T_BIN'], {}), "(spikes['times'], spikes['clusters'], T_BIN)\n", (2451, 2495), False, 'from brainbox.processing import bincount2D\n'), ((3704, 3727), 'numpy.array', 'np.array', (['flat_sat_segs'], {}), '(flat_sat_segs)\n', (3712, 3727), True, 'import numpy as np\n'), ((3761, 3785), 'numpy.array', 'np.array', (['flat_sess_info'], {}), '(flat_sess_info)\n', (3769, 3785), True, 'import numpy as np\n'), ((4831, 4873), 'numpy.load', 'np.load', (['sat_times_path'], {'allow_pickle': '(True)'}), '(sat_times_path, allow_pickle=True)\n', (4838, 4873), True, 'import numpy as np\n'), ((7389, 7482), 'numpy.arange', 'np.arange', (['((binids[trial_idx] - 1) * fs_sync * sr.fs)', '(binids[trial_idx] * fs_sync * sr.fs)'], {}), '((binids[trial_idx] - 1) * fs_sync * sr.fs, binids[trial_idx] *\n fs_sync * sr.fs)\n', (7398, 7482), True, 'import numpy as np\n'), ((7892, 7933), 'numpy.arange', 'np.arange', (['data_bounds[0]', 'data_bounds[1]'], {}), '(data_bounds[0], data_bounds[1])\n', (7901, 7933), True, 'import numpy as np\n'), ((7952, 7965), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (7959, 7965), True, 'import numpy as np\n'), ((8961, 9048), 'numpy.arange', 'np.arange', (['((binids[block_idx] - 1) * fs_sync * fs)', '(binids[block_idx] * fs_sync * fs)'], {}), '((binids[block_idx] - 1) * fs_sync * fs, binids[block_idx] *\n fs_sync * fs)\n', (8970, 9048), True, 'import numpy as np\n'), ((10319, 10362), 'numpy.arange', 'np.arange', (['(start_chunk * fs)', '(end_chunk * fs)'], {}), '(start_chunk * fs, end_chunk * fs)\n', (10328, 10362), True, 'import numpy as np\n'), ((10675, 10710), 'matplotlib.pyplot.plot', 'plt.plot', (['times_data', '(tplot + i * 2)'], {}), '(times_data, tplot + i * 2)\n', (10683, 10710), True, 'import matplotlib.pyplot as plt\n'), ((14895, 14916), 'numpy.min', 'np.min', (['chn_pos[:, 1]'], {}), '(chn_pos[:, 1])\n', (14901, 14916), True, 'import numpy as np\n'), ((14918, 14939), 'numpy.max', 'np.max', (['chn_pos[:, 1]'], {}), '(chn_pos[:, 1])\n', (14924, 14939), True, 'import numpy as np\n'), ((15092, 15140), 'numpy.log10', 'np.log10', (["(rms_ap['rms'][:, chn_inds] * 1000000.0)"], {}), "(rms_ap['rms'][:, chn_inds] * 1000000.0)\n", (15100, 15140), True, 'import numpy as np\n'), ((18766, 18787), 'numpy.min', 'np.min', (['chn_pos[:, 1]'], {}), '(chn_pos[:, 1])\n', (18772, 18787), True, 'import numpy as np\n'), ((18789, 18810), 'numpy.max', 'np.max', (['chn_pos[:, 1]'], {}), '(chn_pos[:, 1])\n', (18795, 18810), True, 'import numpy as np\n'), ((19115, 19181), 'numpy.where', 'np.where', (['((lfp_freq >= freq_range[0]) & (lfp_freq < freq_range[1]))'], {}), '((lfp_freq >= freq_range[0]) & (lfp_freq < freq_range[1]))\n', (19123, 19181), True, 'import numpy as np\n'), ((19305, 19335), 'numpy.log', 'np.log', (['lfp_power[freq_idx, :]'], {}), '(lfp_power[freq_idx, :])\n', (19311, 19335), True, 'import numpy as np\n'), ((1939, 1960), 'pathlib.Path', 'Path', (['D.local_path[0]'], {}), '(D.local_path[0])\n', (1943, 1960), False, 'from pathlib import Path\n'), ((3298, 3326), 'numpy.load', 'np.load', (['(results_folder + ii)'], {}), '(results_folder + ii)\n', (3305, 3326), True, 'import numpy as np\n'), ((3658, 3681), 'numpy.array', 'np.array', (['flat_sat_segs'], {}), '(flat_sat_segs)\n', (3666, 3681), True, 'import numpy as np\n'), ((5593, 5614), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (5599, 5614), True, 'import numpy as np\n'), ((6815, 6834), 'numpy.diff', 'np.diff', (['sync[:, 0]'], {}), '(sync[:, 0])\n', (6822, 6834), True, 'import numpy as np\n'), ((8594, 8613), 'numpy.diff', 'np.diff', (['sync[:, 0]'], {}), '(sync[:, 0])\n', (8601, 8613), True, 'import numpy as np\n'), ((8849, 8879), 'numpy.where', 'np.where', (['(sync[:, 0] > t_start)'], {}), '(sync[:, 0] > t_start)\n', (8857, 8879), True, 'import numpy as np\n'), ((3178, 3201), 'os.walk', 'os.walk', (['results_folder'], {}), '(results_folder)\n', (3185, 3201), False, 'import os\n'), ((5009, 5030), 'pathlib.Path', 'Path', (['D.local_path[0]'], {}), '(D.local_path[0])\n', (5013, 5030), False, 'from pathlib import Path\n'), ((11591, 11612), 'pathlib.Path', 'Path', (['D.local_path[0]'], {}), '(D.local_path[0])\n', (11595, 11612), False, 'from pathlib import Path\n'), ((2544, 2562), 'numpy.mean', 'np.mean', (['R'], {'axis': '(0)'}), '(R, axis=0)\n', (2551, 2562), True, 'import numpy as np\n'), ((4525, 4546), 'pathlib.Path', 'Path', (['D.local_path[0]'], {}), '(D.local_path[0])\n', (4529, 4546), False, 'from pathlib import Path\n')] |
import pandas as pd
from sentinelsat import *
from collections import OrderedDict
from datetime import datetime,timedelta, date
import numpy as np
from rasterio.features import sieve
from Python.prep_raster import computeIndexStack,compute_index
from Python.mlc import *
from Python.pred_raster import dtc_pred_stack
from sklearn.cluster import DBSCAN
from glob import glob
def get_feat_layer_order(predictors):
used_indices = []
used_bands = []
all_bands_order = ['B01','B02','B03','B04','B05','B06','B07','B08','B8A','B09','B11','B12']
indices_order = ['NDVI','REP','FAI','GNDVI','NDVI_B8A','VB_FAH','SEI','SABI']
for feature in predictors:
if feature in indices_order:
used_indices += [indices_order.index(feature)+1]
if feature in all_bands_order:
used_bands += [all_bands_order.index(feature)+1]
return used_indices,used_bands
def retrieve_product(date_tiles_dict,api):
#retrieving product informations
products = OrderedDict()
for tile in list(date_tiles_dict.keys()):
for d in date_tiles_dict[tile]:
date = datetime.strptime(str(d),'%Y%m%d').date()
#contrsuct query
kw_query = {'platformname': 'Sentinel-2',
'filename':f'*_{tile}_*',
'date':(date, date+timedelta(days=5))} #plus 5 days to get single scene
#get level-2 products if date> December 2018
if date>datetime.strptime(str(20181201),'%Y%m%d').date():
kw_query['producttype']= 'S2MSI2A'
else:
kw_query['producttype']= 'S2MSI1C'
#retrieve ID used to download the data and store to OrderedDict()
pp = api.query(**kw_query)
products.update(pp)
#convert to dataframe to view product information (cloud coverage, sensing date, etc.)
df_products = api.to_dataframe(products)
return df_products
def semi_sv_pred(nd_array,mlc_model,dtc_model,rescale=True,mlc_thr=7.79,gndvi_thr=0.05,b02_thr=0.15,sieve_size=10):
"""
Function to predict a numpy ndarray based on trained DTC and MLC models.
A simple density slicing based on the GNDVI is also employed here.
mlc_thr --> threshold based on the chi square table (n=4)
gndvi_thr --> threshold for GNDVI image
b02_thr --> threshold use for creating a cloud mask based on B02
sieve_size --> minimal sieve size to filter pixel clusters
"""
if rescale:nd_array = nd_array/10000
b5_b11_img = nd_array[[4,10],:,:]
b2_img = nd_array[1,:,:]
#DTC, MLC and GNDVI density slicing classifications
stack2pred_img = np.concatenate((computeIndexStack(nd_array,['NDVI','REP']),b5_b11_img))
mlc_img = np.where(np.array([mlc_model.classify_raster_gx(stack2pred_img,threshold=mlc_thr)])==3,1,0)
dtc_img = np.where(np.array([dtc_pred_stack(dtc_model,stack2pred_img)])==3,1,0)
slice_img = np.array([np.where(compute_index(nd_array,'GNDVI')>=gndvi_thr,1,0)])
#sum classificaiton results
arr_sum = np.sum([mlc_img,dtc_img,slice_img],axis=0)
results = np.where(arr_sum==arr_sum.max(),1,0)
#apply cloud mask and sieve filter (minimum sieve size = 3 pixel)
cloud_mask = np.where(b2_img>=b02_thr,1,0).astype(int)
results_masked = np.where(cloud_mask!=1,results,0)
results_sieved = np.array([sieve(results_masked[0],size=sieve_size)]).astype(np.uint8)
if results_sieved.max()!=0:
return results_sieved
def dbscan_cluster(gdf_pt_geom,min_dist_km):
#convert points to degree and get lat,lon
lat = gdf_pt_geom.to_crs(4326).y.values
lon = gdf_pt_geom.to_crs(4326).x.values
matrix = np.array([lat, lon]).T
#convert kms to radian unit
epsilon = min_dist_km / 6371.0088
#perform DBSCAN and get cluster labels
db = DBSCAN(eps=epsilon, min_samples=1, algorithm='ball_tree', metric='haversine').fit(np.radians(matrix))
cluster_labels = db.labels_+1
return cluster_labels
def get_band_paths(safe_path):
bands = ['B01_60m','B02_10m','B03_10m','B04_10m','B05_20m','B06_20m',
'B07_20m','B08_10m','B8A_20m','B09_60m','B11_20m','B12_20m']
return [img for band in bands for img in glob(safe_path + "*/**/*.jp2", recursive=True) if band in img] | [
"numpy.radians",
"numpy.sum",
"Python.prep_raster.compute_index",
"Python.prep_raster.computeIndexStack",
"numpy.where",
"numpy.array",
"rasterio.features.sieve",
"datetime.timedelta",
"glob.glob",
"collections.OrderedDict",
"Python.pred_raster.dtc_pred_stack",
"sklearn.cluster.DBSCAN"
] | [((1019, 1032), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1030, 1032), False, 'from collections import OrderedDict\n'), ((3097, 3142), 'numpy.sum', 'np.sum', (['[mlc_img, dtc_img, slice_img]'], {'axis': '(0)'}), '([mlc_img, dtc_img, slice_img], axis=0)\n', (3103, 3142), True, 'import numpy as np\n'), ((3346, 3383), 'numpy.where', 'np.where', (['(cloud_mask != 1)', 'results', '(0)'], {}), '(cloud_mask != 1, results, 0)\n', (3354, 3383), True, 'import numpy as np\n'), ((3736, 3756), 'numpy.array', 'np.array', (['[lat, lon]'], {}), '([lat, lon])\n', (3744, 3756), True, 'import numpy as np\n'), ((3973, 3991), 'numpy.radians', 'np.radians', (['matrix'], {}), '(matrix)\n', (3983, 3991), True, 'import numpy as np\n'), ((2714, 2758), 'Python.prep_raster.computeIndexStack', 'computeIndexStack', (['nd_array', "['NDVI', 'REP']"], {}), "(nd_array, ['NDVI', 'REP'])\n", (2731, 2758), False, 'from Python.prep_raster import computeIndexStack, compute_index\n'), ((3283, 3316), 'numpy.where', 'np.where', (['(b2_img >= b02_thr)', '(1)', '(0)'], {}), '(b2_img >= b02_thr, 1, 0)\n', (3291, 3316), True, 'import numpy as np\n'), ((3891, 3968), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': 'epsilon', 'min_samples': '(1)', 'algorithm': '"""ball_tree"""', 'metric': '"""haversine"""'}), "(eps=epsilon, min_samples=1, algorithm='ball_tree', metric='haversine')\n", (3897, 3968), False, 'from sklearn.cluster import DBSCAN\n'), ((4284, 4330), 'glob.glob', 'glob', (["(safe_path + '*/**/*.jp2')"], {'recursive': '(True)'}), "(safe_path + '*/**/*.jp2', recursive=True)\n", (4288, 4330), False, 'from glob import glob\n'), ((2909, 2950), 'Python.pred_raster.dtc_pred_stack', 'dtc_pred_stack', (['dtc_model', 'stack2pred_img'], {}), '(dtc_model, stack2pred_img)\n', (2923, 2950), False, 'from Python.pred_raster import dtc_pred_stack\n'), ((2995, 3027), 'Python.prep_raster.compute_index', 'compute_index', (['nd_array', '"""GNDVI"""'], {}), "(nd_array, 'GNDVI')\n", (3008, 3027), False, 'from Python.prep_raster import computeIndexStack, compute_index\n'), ((3411, 3452), 'rasterio.features.sieve', 'sieve', (['results_masked[0]'], {'size': 'sieve_size'}), '(results_masked[0], size=sieve_size)\n', (3416, 3452), False, 'from rasterio.features import sieve\n'), ((1359, 1376), 'datetime.timedelta', 'timedelta', ([], {'days': '(5)'}), '(days=5)\n', (1368, 1376), False, 'from datetime import datetime, timedelta, date\n')] |
# -*- coding: utf-8 -*-
import pathlib # change to pathlib from Python 3.4 instead of os
import tifffile, cv2, datetime, pickle
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader
import moviepy.editor as mpy
from moviepy.video.io.bindings import mplfig_to_npimage
class OHW():
"""
main class of OpenHeartWare
bundles analysis (algorithm + parameters + ROI + MVs)
--> in future: can be saved to reuse
"""
def __init__(self):
self.rawImageStack = None # array for raw imported imagestack
#self.ROIImageStack = None # array for ROIs
self.rawMVs = None # array for raw motion vectors (MVs)
self.unitMVs = None # MVs in correct unit (microns)
#self.MV_parameters = None # dict for MV parameters
# bundle these parameters in dict?
self.absMotions = None # absolulte motions, either from MVs or from intensity
self.mean_absMotions = None # for 1D-representation
self.avg_absMotion = None # time averaged absolute motion
self.avg_MotionX = None # time averaged x-motion
self.avg_MotionY = None # time averaged y-motion
self.max_avgMotion = None # maximum of time averaged motions
self.timeindex = None # time index for 1D-representation
self.PeakDetection = PeakDetection.PeakDetection() # class which detects + saves peaks
self.video_loaded = False # tells state if video is connected to ohw-objecet
#self.exceptions = None
#self.isROI_OHW = False
self.config = helpfunctions.read_config() # load current config
self.raw_videometa = {"inputpath":""} # raw info after importing # dict of video metadata: microns_per_px, fps, blackval, whiteval,
self.set_default_videometa(self.raw_videometa)
self.videometa = self.raw_videometa.copy()
self.analysis_meta = {"date": datetime.datetime.now(), "version": self.config['UPDATE']['version'],
"motion_calculated":False, "has_MVs": False, "results_folder":""}
self.init_kinplot_options()
def import_video(self, inputpath, *args, **kwargs):
self.rawImageStack, self.raw_videometa = videoreader.import_video(inputpath)
self.set_default_videometa(self.raw_videometa)
self.videometa = self.raw_videometa.copy()
self.set_auto_results_folder()
self.video_loaded = True
def import_video_thread(self, inputpath):
self.thread_import_video = helpfunctions.turn_function_into_thread(self.import_video, emit_progSignal = True, inputpath = inputpath)
return self.thread_import_video
def reload_video(self, *args, **kwargs):
inputpath = self.videometa["inputpath"]
self.rawImageStack, self.raw_videometa = videoreader.import_video(inputpath)
self.video_loaded = True
def reload_video_thread(self):
self.thread_reload_video = helpfunctions.turn_function_into_thread(self.reload_video, emit_progSignal = True)
return self.thread_reload_video
def set_default_videometa(self, videometa):
"""
gets default values from config if not found in input
rounds values to specified digits
"""
for key in ["fps","microns_per_px"]:
if key not in videometa.keys():
videometa[key] = self.config['DEFAULT VALUES'][key]
videometa["fps"] = round(float(videometa["fps"]), 1)# round fps to 1 digit
videometa["microns_per_px"] = round(float(videometa["microns_per_px"]), 4)
def set_video(self, rawImageStack, videometa):
'''
set video data + metadata directly instead of importing whole video again
'''
self.rawImageStack, self.videometa = rawImageStack, videometa
self.raw_videometa = self.videometa.copy() #raw_videometa not available anymore
self.set_auto_results_folder()
self.video_loaded = True
def set_auto_results_folder(self):
# set results folder
inputpath = self.videometa["inputpath"]
if self.videometa["input_type"] == 'videofile':
self.analysis_meta["results_folder"] = inputpath.parent / ("results_" + str(inputpath.stem) )
else:
self.analysis_meta["results_folder"] = inputpath.parent / "results"
self.analysis_meta["inputpath"] = inputpath
def set_analysisImageStack(self, px_longest = None, roi = None):
'''
specifies resolution + roi of video to be analyzed
px_longest: resolution of longest side
if px_longest = None: original resolution is used
roi: specify region of interest, coordinates of rectangular that was selected as ROI in unmodified coordinates
'''
self.analysis_meta.update({'px_longest': px_longest, 'roi': roi, "scalingfactor":1})
self.analysisImageStack = self.rawImageStack
# select roi
if roi != None:
self.analysisImageStack = self.analysisImageStack[:,int(roi[1]):int(roi[1]+roi[3]), int(roi[0]):int(roi[0]+roi[2])]
pass
# rescale input
# take original resoltuion if no other specified
if px_longest != None:
self.analysisImageStack, self.analysis_meta["scalingfactor"] = helpfunctions.scale_ImageStack(self.analysisImageStack, px_longest=px_longest)
self.analysis_meta.update({'shape': self.analysisImageStack.shape})
def save_ohw(self):
'''
saves ohw analysis object with all necessary info
especially useful after batchrun
-> no need to recalculate MVs when filters/ plotting parameters are changed
'''
if self.analysis_meta["results_folder"] == "": # don't save when no file loaded
return
filename = str(self.analysis_meta["results_folder"]/'ohw_analysis.pickle')
savedata = [self.analysis_meta, self.videometa, self.rawMVs, self.PeakDetection.Peaks]
# keep saving minimal, everything should be reconstructed from these parameters...
self.analysis_meta["results_folder"].mkdir(parents = True, exist_ok = True)
with open(filename, 'wb') as writefile:
pickle.dump(savedata, writefile, protocol=pickle.HIGHEST_PROTOCOL)
def load_ohw(self, filename):
"""
loads ohw analysis object saved previously and sets corresponding variables
initializes motion to get to the same state before saving
"""
# take care if sth will be overwritten?
# best way to insert rawImageStack?
with open(filename, 'rb') as loadfile:
data = pickle.load(loadfile)
self.analysis_meta, self.videometa, self.rawMVs, Peaks = data
self.video_loaded = False
self.init_motion()
self.set_peaks(Peaks) #call after init_motion as this resets peaks
def calculate_motion(self, method = 'BM', progressSignal = None, **parameters):
"""
calculates motion (either motionvectors MVs or absolute motion) of imagestack based
on specified method and parameters
allowed methods:
-BM: blockmatch
-GF: gunnar farnbäck
-LK: lucas-kanade
-MM: musclemotion
"""
#store parameters which wwill be used for the calculation of MVs
self.analysis_meta.update({'Motion_method': method, 'MV_parameters': parameters})
if method == 'BM':
self.rawMVs = OFlowCalc.BM_stack(self.analysisImageStack,
progressSignal = progressSignal, **parameters)
self.analysis_meta["has_MVs"], self.analysis_meta["motion_calculated"] = True, True
elif method == 'GF':
pass
elif method == 'LK':
pass
elif method == 'MM':
# self.absMotions = ...
pass
def calculate_motion_thread(self, **parameters):
self.thread_calculate_motion = helpfunctions.turn_function_into_thread(
self.calculate_motion, emit_progSignal=True, **parameters)
return self.thread_calculate_motion
def init_motion(self):
'''
calculate 2D & 1D data representations after motion determination
'''
#distinguish between MVs and motion here
scalingfactor, delay = self.analysis_meta["scalingfactor"], self.analysis_meta["MV_parameters"]["delay"]
filter = self.analysis_meta["filter_status"]
if filter:
print("filtering single movements")
rawMVs_filt = Filters.filter_singlemov(self.rawMVs) #don't change rawMVs! repeated loading would vary it each time
else:
rawMVs_filt = self.rawMVs
self.unitMVs = (rawMVs_filt / scalingfactor) * self.videometa["microns_per_px"] * (self.videometa["fps"] / delay)
self.absMotions = np.sqrt(self.unitMVs[:,0]*self.unitMVs[:,0] + self.unitMVs[:,1]*self.unitMVs[:,1])# get absolute motions per frame
self.get_mean_absMotion()
self.prepare_quiver_components()
self.calc_TimeAveragedMotion()
self.PeakDetection.set_data(self.timeindex, self.mean_absMotions) #or pass self directly?
def get_mean_absMotion(self):
"""
calculates movement mask (eliminates all pixels where no movement occurs through all frames)
applies mask to absMotions and calculate mean motion per frame
"""
# move into filter module in future?
summed_absMotions = np.sum(self.absMotions, axis = 0) # select only points in array with nonzero movement
movement_mask = (summed_absMotions == 0)
filtered_absMotions = np.copy(self.absMotions) #copy needed, don't influence absMotions
filtered_absMotions[:,movement_mask] = np.nan
self.mean_absMotions = np.nanmean(filtered_absMotions, axis=(1,2))
self.analysis_meta["results_folder"].mkdir(parents = True, exist_ok = True) #create folder for results if it doesn't exist
self.timeindex = (np.arange(self.mean_absMotions.shape[0]) / self.videometa["fps"]).round(2)
#np.save(str(self.analysis_meta["results_folder"] / 'beating_kinetics.npy'), np.array([self.timeindex,self.mean_absMotions]))
#save in own function if desired...
def prepare_quiver_components(self):
'''
sets all 0-motions to nan such that they won't be plotted in quiver plot
determines scale_max and cuts off all longer vectors
creates grid of coordinates
'''
self.MV_zerofiltered = Filters.zeromotion_to_nan(self.unitMVs, copy=True)
scale_max = helpfunctions.get_scale_maxMotion2(self.absMotions)
MV_cutoff = Filters.cutoffMVs(self.MV_zerofiltered, max_length = scale_max) #copy=True
self.QuiverMotionX = MV_cutoff[:,0,:,:] # changed name to QuiverMotionX as values are manipulated here
self.QuiverMotionY = MV_cutoff[:,1,:,:]
bw = self.analysis_meta["MV_parameters"]["blockwidth"]
Nx, Ny = MV_cutoff[0,0].shape
self.MotionCoordinatesX, self.MotionCoordinatesY = np.meshgrid(
np.arange(Ny)*bw+bw/2,
np.arange(Nx)*bw+bw/2) #Nx, Ny exchanged (np vs. image indexing); possible issues with odd bws?
def save_MVs(self):
"""
saves raw MVs as npy file
... replace with functions which pickles all data in class?
"""
results_folder = self.analysis_meta["results_folder"]
results_folder.mkdir(parents = True, exist_ok = True) #create folder for results if it doesn't exist
save_file = str(results_folder / 'rawMVs.npy')
np.save(save_file, self.rawMVs)
save_file_units = str(results_folder / 'unitMVs.npy')
np.save(save_file_units, self.unitMVs)
#def plot_scalebar(self):
# moved to module: helpfunctions.insert_scalebar(imageStack, videometa, analysis_meta)
def save_heatmap(self, singleframe):
savepath = self.analysis_meta["results_folder"]/'heatmap_results'
plotfunctions.save_heatmap(ohw_dataset = self, savepath = savepath, singleframe=singleframe)
def save_heatmap_thread(self, singleframe):
savepath = self.analysis_meta["results_folder"]/'heatmap_results'
self.thread_save_heatmap = helpfunctions.turn_function_into_thread(
plotfunctions.save_heatmap, ohw_dataset = self, savepath = savepath, singleframe=False)
return self.thread_save_heatmap
def save_quiver3(self, singleframe, skipquivers = 1):
savepath = self.analysis_meta["results_folder"]/'quiver_results'
plotfunctions.save_quiver3(ohw_dataset = self, savepath = savepath, singleframe=singleframe, skipquivers=skipquivers)
def save_quiver3_thread(self, singleframe, skipquivers):#t_cut
savepath = self.analysis_meta["results_folder"]/'quiver_results'
self.thread_save_quiver3 = helpfunctions.turn_function_into_thread(
plotfunctions.save_quiver3, ohw_dataset = self, savepath = savepath, singleframe = singleframe, skipquivers=skipquivers)#t_cut=t_cut
return self.thread_save_quiver3
def save_quiver_thread(self, singleframe, skipquivers, t_cut):
self.thread_save_quiver = helpfunctions.turn_function_into_thread(self.save_quiver, singleframe=False, skipquivers=skipquivers, t_cut=t_cut)
return self.thread_save_quiver
def cut_clip(self, clip_full, t_cut=0):
#if user chose to cut the clip after t_cut seconds:
t_cut = round(t_cut, 2)
if t_cut is not 0:
#t_cut is the end of the clip in seconds of the original clip
return clip_full.subclip(t_start=0, t_end=t_cut)
else:
return clip_full
def set_peaks(self, Peaks):
''' update with manually added/ deleted peaks '''
self.PeakDetection.set_peaks(Peaks)
self.order_peaks()
def detect_peaks(self, ratio, number_of_neighbours):
''' automated peak detection in mean_absMotions'''
self.PeakDetection.detect_peaks(ratio, number_of_neighbours)
self.order_peaks()
def order_peaks(self):
self.PeakDetection.order_peaks()
def get_peaks(self):
return self.PeakDetection.Peaks, self.PeakDetection.hipeaks, self.PeakDetection.lopeaks
def get_peakstatistics(self):
self.PeakDetection.calc_peakstatistics()
return self.PeakDetection.get_peakstatistics()
def export_analysis(self):
self.PeakDetection.export_analysis(self.analysis_meta["results_folder"])
def plot_beatingKinetics(self, filename=None):
if filename == None:
filename=self.analysis_meta["results_folder"]/ 'beating_kinetics.png'
plotfunctions.plot_Kinetics(self.timeindex, self.mean_absMotions, self.kinplot_options,
self.PeakDetection.hipeaks, self.PeakDetection.lopeaks, filename)
def init_kinplot_options(self):
self.kinplot_options = dict(self.config._sections['KINPLOT OPTIONS'])
for key, value in self.kinplot_options.items(): # can be definitely improved...
if value == "None":
self.kinplot_options[key] = None
elif value == "true":
self.kinplot_options[key] = True
elif value == "false":
self.kinplot_options[key] = False
else:
self.kinplot_options[key] = float(value)
def set_kinplot_options(self, kinplot_options):
self.kinplot_options = kinplot_options
#also change config to new value?
def calc_TimeAveragedMotion(self):
''' calculates time averaged motion for abs. motion, x- and y-motion '''
self.avg_absMotion = np.nanmean(self.absMotions, axis = 0)
MotionX = self.unitMVs[:,0,:,:]
MotionY = self.unitMVs[:,1,:,:] #squeeze not necessary anymore, dimension reduced
absMotionX = np.abs(MotionX) #calculate mean of absolute values!
self.avg_MotionX = np.nanmean(absMotionX, axis = 0)
absMotionY = np.abs(MotionY)
self.avg_MotionY = np.nanmean(absMotionY, axis = 0)
self.max_avgMotion = np.max ([self.avg_absMotion, self.avg_MotionX, self.avg_MotionY]) # avg_absMotion should be enough
def plot_TimeAveragedMotions(self, file_ext='.png'):
plotfunctions.plot_TimeAveragedMotions(self, file_ext)
def createROIImageStack(self, r):
#r are coordinates of rectangular that was selected as ROI
# print(r)
self.ROIImageStack = []
for idx in range(0, self.rawImageStack.shape[0]):
image_ROI = self.rawImageStack[idx][int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
self.ROIImageStack.append(image_ROI)
self.ROIImageStack = np.asarray(self.ROIImageStack)
# self.ROIImageStack = [img[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])] for img in self.rawImageStack.tolist()]
# self.ROIImageStack = np.asarray(self.ROIImageStack)
if __name__ == "__main__":
OHW = OHW() | [
"pickle.dump",
"numpy.sum",
"numpy.abs",
"libraries.plotfunctions.plot_TimeAveragedMotions",
"libraries.helpfunctions.scale_ImageStack",
"pickle.load",
"numpy.arange",
"libraries.helpfunctions.read_config",
"numpy.nanmean",
"numpy.copy",
"libraries.videoreader.import_video",
"numpy.max",
"li... | [((1581, 1610), 'libraries.PeakDetection.PeakDetection', 'PeakDetection.PeakDetection', ([], {}), '()\n', (1608, 1610), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((1840, 1867), 'libraries.helpfunctions.read_config', 'helpfunctions.read_config', ([], {}), '()\n', (1865, 1867), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((2497, 2532), 'libraries.videoreader.import_video', 'videoreader.import_video', (['inputpath'], {}), '(inputpath)\n', (2521, 2532), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((2808, 2914), 'libraries.helpfunctions.turn_function_into_thread', 'helpfunctions.turn_function_into_thread', (['self.import_video'], {'emit_progSignal': '(True)', 'inputpath': 'inputpath'}), '(self.import_video, emit_progSignal=\n True, inputpath=inputpath)\n', (2847, 2914), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((3117, 3152), 'libraries.videoreader.import_video', 'videoreader.import_video', (['inputpath'], {}), '(inputpath)\n', (3141, 3152), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((3261, 3346), 'libraries.helpfunctions.turn_function_into_thread', 'helpfunctions.turn_function_into_thread', (['self.reload_video'], {'emit_progSignal': '(True)'}), '(self.reload_video, emit_progSignal=True\n )\n', (3300, 3346), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((8508, 8610), 'libraries.helpfunctions.turn_function_into_thread', 'helpfunctions.turn_function_into_thread', (['self.calculate_motion'], {'emit_progSignal': '(True)'}), '(self.calculate_motion,\n emit_progSignal=True, **parameters)\n', (8547, 8610), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((9465, 9560), 'numpy.sqrt', 'np.sqrt', (['(self.unitMVs[:, 0] * self.unitMVs[:, 0] + self.unitMVs[:, 1] * self.\n unitMVs[:, 1])'], {}), '(self.unitMVs[:, 0] * self.unitMVs[:, 0] + self.unitMVs[:, 1] * self\n .unitMVs[:, 1])\n', (9472, 9560), True, 'import numpy as np\n'), ((10130, 10161), 'numpy.sum', 'np.sum', (['self.absMotions'], {'axis': '(0)'}), '(self.absMotions, axis=0)\n', (10136, 10161), True, 'import numpy as np\n'), ((10308, 10332), 'numpy.copy', 'np.copy', (['self.absMotions'], {}), '(self.absMotions)\n', (10315, 10332), True, 'import numpy as np\n'), ((10461, 10505), 'numpy.nanmean', 'np.nanmean', (['filtered_absMotions'], {'axis': '(1, 2)'}), '(filtered_absMotions, axis=(1, 2))\n', (10471, 10505), True, 'import numpy as np\n'), ((11242, 11292), 'libraries.Filters.zeromotion_to_nan', 'Filters.zeromotion_to_nan', (['self.unitMVs'], {'copy': '(True)'}), '(self.unitMVs, copy=True)\n', (11267, 11292), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((11314, 11365), 'libraries.helpfunctions.get_scale_maxMotion2', 'helpfunctions.get_scale_maxMotion2', (['self.absMotions'], {}), '(self.absMotions)\n', (11348, 11365), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((11395, 11456), 'libraries.Filters.cutoffMVs', 'Filters.cutoffMVs', (['self.MV_zerofiltered'], {'max_length': 'scale_max'}), '(self.MV_zerofiltered, max_length=scale_max)\n', (11412, 11456), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((12407, 12438), 'numpy.save', 'np.save', (['save_file', 'self.rawMVs'], {}), '(save_file, self.rawMVs)\n', (12414, 12438), True, 'import numpy as np\n'), ((12511, 12549), 'numpy.save', 'np.save', (['save_file_units', 'self.unitMVs'], {}), '(save_file_units, self.unitMVs)\n', (12518, 12549), True, 'import numpy as np\n'), ((12815, 12908), 'libraries.plotfunctions.save_heatmap', 'plotfunctions.save_heatmap', ([], {'ohw_dataset': 'self', 'savepath': 'savepath', 'singleframe': 'singleframe'}), '(ohw_dataset=self, savepath=savepath, singleframe\n =singleframe)\n', (12841, 12908), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((13074, 13201), 'libraries.helpfunctions.turn_function_into_thread', 'helpfunctions.turn_function_into_thread', (['plotfunctions.save_heatmap'], {'ohw_dataset': 'self', 'savepath': 'savepath', 'singleframe': '(False)'}), '(plotfunctions.save_heatmap,\n ohw_dataset=self, savepath=savepath, singleframe=False)\n', (13113, 13201), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((13401, 13519), 'libraries.plotfunctions.save_quiver3', 'plotfunctions.save_quiver3', ([], {'ohw_dataset': 'self', 'savepath': 'savepath', 'singleframe': 'singleframe', 'skipquivers': 'skipquivers'}), '(ohw_dataset=self, savepath=savepath, singleframe\n =singleframe, skipquivers=skipquivers)\n', (13427, 13519), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((13703, 13865), 'libraries.helpfunctions.turn_function_into_thread', 'helpfunctions.turn_function_into_thread', (['plotfunctions.save_quiver3'], {'ohw_dataset': 'self', 'savepath': 'savepath', 'singleframe': 'singleframe', 'skipquivers': 'skipquivers'}), '(plotfunctions.save_quiver3,\n ohw_dataset=self, savepath=savepath, singleframe=singleframe,\n skipquivers=skipquivers)\n', (13742, 13865), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((14036, 14154), 'libraries.helpfunctions.turn_function_into_thread', 'helpfunctions.turn_function_into_thread', (['self.save_quiver'], {'singleframe': '(False)', 'skipquivers': 'skipquivers', 't_cut': 't_cut'}), '(self.save_quiver, singleframe=False,\n skipquivers=skipquivers, t_cut=t_cut)\n', (14075, 14154), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((15637, 15799), 'libraries.plotfunctions.plot_Kinetics', 'plotfunctions.plot_Kinetics', (['self.timeindex', 'self.mean_absMotions', 'self.kinplot_options', 'self.PeakDetection.hipeaks', 'self.PeakDetection.lopeaks', 'filename'], {}), '(self.timeindex, self.mean_absMotions, self.\n kinplot_options, self.PeakDetection.hipeaks, self.PeakDetection.lopeaks,\n filename)\n', (15664, 15799), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((16666, 16701), 'numpy.nanmean', 'np.nanmean', (['self.absMotions'], {'axis': '(0)'}), '(self.absMotions, axis=0)\n', (16676, 16701), True, 'import numpy as np\n'), ((16871, 16886), 'numpy.abs', 'np.abs', (['MotionX'], {}), '(MotionX)\n', (16877, 16886), True, 'import numpy as np\n'), ((16954, 16984), 'numpy.nanmean', 'np.nanmean', (['absMotionX'], {'axis': '(0)'}), '(absMotionX, axis=0)\n', (16964, 16984), True, 'import numpy as np\n'), ((17011, 17026), 'numpy.abs', 'np.abs', (['MotionY'], {}), '(MotionY)\n', (17017, 17026), True, 'import numpy as np\n'), ((17055, 17085), 'numpy.nanmean', 'np.nanmean', (['absMotionY'], {'axis': '(0)'}), '(absMotionY, axis=0)\n', (17065, 17085), True, 'import numpy as np\n'), ((17120, 17184), 'numpy.max', 'np.max', (['[self.avg_absMotion, self.avg_MotionX, self.avg_MotionY]'], {}), '([self.avg_absMotion, self.avg_MotionX, self.avg_MotionY])\n', (17126, 17184), True, 'import numpy as np\n'), ((17292, 17346), 'libraries.plotfunctions.plot_TimeAveragedMotions', 'plotfunctions.plot_TimeAveragedMotions', (['self', 'file_ext'], {}), '(self, file_ext)\n', (17330, 17346), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((17752, 17782), 'numpy.asarray', 'np.asarray', (['self.ROIImageStack'], {}), '(self.ROIImageStack)\n', (17762, 17782), True, 'import numpy as np\n'), ((2189, 2212), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2210, 2212), False, 'import tifffile, cv2, datetime, pickle\n'), ((5730, 5808), 'libraries.helpfunctions.scale_ImageStack', 'helpfunctions.scale_ImageStack', (['self.analysisImageStack'], {'px_longest': 'px_longest'}), '(self.analysisImageStack, px_longest=px_longest)\n', (5760, 5808), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((6678, 6744), 'pickle.dump', 'pickle.dump', (['savedata', 'writefile'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(savedata, writefile, protocol=pickle.HIGHEST_PROTOCOL)\n', (6689, 6744), False, 'import tifffile, cv2, datetime, pickle\n'), ((7140, 7161), 'pickle.load', 'pickle.load', (['loadfile'], {}), '(loadfile)\n', (7151, 7161), False, 'import tifffile, cv2, datetime, pickle\n'), ((8023, 8115), 'libraries.OFlowCalc.BM_stack', 'OFlowCalc.BM_stack', (['self.analysisImageStack'], {'progressSignal': 'progressSignal'}), '(self.analysisImageStack, progressSignal=progressSignal,\n **parameters)\n', (8041, 8115), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((9150, 9187), 'libraries.Filters.filter_singlemov', 'Filters.filter_singlemov', (['self.rawMVs'], {}), '(self.rawMVs)\n', (9174, 9187), False, 'from libraries import OFlowCalc, Filters, plotfunctions, helpfunctions, PeakDetection, videoreader\n'), ((10674, 10714), 'numpy.arange', 'np.arange', (['self.mean_absMotions.shape[0]'], {}), '(self.mean_absMotions.shape[0])\n', (10683, 10714), True, 'import numpy as np\n'), ((11850, 11863), 'numpy.arange', 'np.arange', (['Ny'], {}), '(Ny)\n', (11859, 11863), True, 'import numpy as np\n'), ((11886, 11899), 'numpy.arange', 'np.arange', (['Nx'], {}), '(Nx)\n', (11895, 11899), True, 'import numpy as np\n')] |
import sys
import os
import pytest
from numpy import array, array_equal, allclose
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from lxmls.readers import galton
tolerance = 1e-5
@pytest.fixture(scope='module')
def galton_data():
return galton.load()
def test_galton_data(galton_data):
mean = galton_data.mean(0)
expected_mean = array([68.30818966, 68.08846983])
assert allclose(mean, expected_mean, tolerance)
std = galton_data.std(0)
expected_std = array([1.78637014, 2.51658435])
assert allclose(std, expected_std, tolerance)
n, bins, _ = plt.hist(galton_data)
expected_n = [array([ 0., 14., 23., 66., 289., 219., 183., 68., 43., 23.]), array([ 12., 32., 107., 117., 138., 120., 167., 163., 41., 31.])]
expected_bins = array([61.7, 62.9, 64.1, 65.3, 66.5, 67.7, 68.9, 70.1, 71.3, 72.5, 73.7])
assert allclose(n, expected_n, tolerance)
assert allclose(bins, expected_bins, tolerance)
if __name__ == '__main__':
pytest.main([__file__])
| [
"matplotlib.pyplot.hist",
"numpy.allclose",
"pytest.fixture",
"pytest.main",
"matplotlib.use",
"numpy.array",
"lxmls.readers.galton.load"
] | [((101, 122), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (115, 122), False, 'import matplotlib\n'), ((210, 240), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (224, 240), False, 'import pytest\n'), ((271, 284), 'lxmls.readers.galton.load', 'galton.load', ([], {}), '()\n', (282, 284), False, 'from lxmls.readers import galton\n'), ((373, 406), 'numpy.array', 'array', (['[68.30818966, 68.08846983]'], {}), '([68.30818966, 68.08846983])\n', (378, 406), False, 'from numpy import array, array_equal, allclose\n'), ((418, 458), 'numpy.allclose', 'allclose', (['mean', 'expected_mean', 'tolerance'], {}), '(mean, expected_mean, tolerance)\n', (426, 458), False, 'from numpy import array, array_equal, allclose\n'), ((508, 539), 'numpy.array', 'array', (['[1.78637014, 2.51658435]'], {}), '([1.78637014, 2.51658435])\n', (513, 539), False, 'from numpy import array, array_equal, allclose\n'), ((551, 589), 'numpy.allclose', 'allclose', (['std', 'expected_std', 'tolerance'], {}), '(std, expected_std, tolerance)\n', (559, 589), False, 'from numpy import array, array_equal, allclose\n'), ((608, 629), 'matplotlib.pyplot.hist', 'plt.hist', (['galton_data'], {}), '(galton_data)\n', (616, 629), True, 'import matplotlib.pyplot as plt\n'), ((806, 879), 'numpy.array', 'array', (['[61.7, 62.9, 64.1, 65.3, 66.5, 67.7, 68.9, 70.1, 71.3, 72.5, 73.7]'], {}), '([61.7, 62.9, 64.1, 65.3, 66.5, 67.7, 68.9, 70.1, 71.3, 72.5, 73.7])\n', (811, 879), False, 'from numpy import array, array_equal, allclose\n'), ((891, 925), 'numpy.allclose', 'allclose', (['n', 'expected_n', 'tolerance'], {}), '(n, expected_n, tolerance)\n', (899, 925), False, 'from numpy import array, array_equal, allclose\n'), ((937, 977), 'numpy.allclose', 'allclose', (['bins', 'expected_bins', 'tolerance'], {}), '(bins, expected_bins, tolerance)\n', (945, 977), False, 'from numpy import array, array_equal, allclose\n'), ((1011, 1034), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (1022, 1034), False, 'import pytest\n'), ((648, 717), 'numpy.array', 'array', (['[0.0, 14.0, 23.0, 66.0, 289.0, 219.0, 183.0, 68.0, 43.0, 23.0]'], {}), '([0.0, 14.0, 23.0, 66.0, 289.0, 219.0, 183.0, 68.0, 43.0, 23.0])\n', (653, 717), False, 'from numpy import array, array_equal, allclose\n'), ((717, 790), 'numpy.array', 'array', (['[12.0, 32.0, 107.0, 117.0, 138.0, 120.0, 167.0, 163.0, 41.0, 31.0]'], {}), '([12.0, 32.0, 107.0, 117.0, 138.0, 120.0, 167.0, 163.0, 41.0, 31.0])\n', (722, 790), False, 'from numpy import array, array_equal, allclose\n')] |
# -*- coding: utf-8 -*-
import os
import numpy as np
import glob
import re
import multiprocessing as mp
from parameters import ovfParms
class OvfFile:
def __init__(self, path, parms=None):
self._path = path
if parms is None:
self._parms = ovfParms()
else:
self._parms = parms
if self._path.split(".")[-1] == "npz":
self.array, self.headers, self.time = self.load_npz(self._path)
print("Data loaded successfully from ", path)
else:
if os.path.isdir(self._path):
self.headers = self.load_file(self.get_files_names()[0])[1]
self.array, self.time = self.parse_dir()
else:
self.array, self.headers = self.load_file(self._path)
self.array = self.parse_array(self.array)
self.time = self.headers['Desc']
if any((self._parms.getParms["tStart"], self._parms.getParms["tStop"],
self._parms.getParms["zStart"], self._parms.getParms["zStop"],
self._parms.getParms["yStart"], self._parms.getParms["yStop"],
self._parms.getParms["xStart"], self._parms.getParms["xStop"])):
self.array = self.array[self._parms.getParms["zStart"]:self._parms.getParms["zStop"],
self._parms.getParms["yStart"]:self._parms.getParms["yStop"],
self._parms.getParms["xStart"]:self._parms.getParms["xStop"],
:]
def __eq__(self, other):
return np.allclose(self, other)
def load_npz(self, path):
with np.load(path) as data:
return data["array"], data["headers"][()], data["time"]
def parse_dir(self):
file_list = self.get_files_names()
print("Reading folder: " + self._path + "/" +
self._parms.getParms["head"] + '*.ovf')
print("N of files to process: ", len(file_list))
print("Available nodes (n-1): " + str(int(mp.cpu_count() - 1)))
pool = mp.Pool(processes=int(mp.cpu_count() - 1))
array, time = zip(*[(i, j["Desc"]) for i, j in pool.map(self.load_file, file_list)])
pool.close()
pool.join()
array = np.array(array, dtype=np.float32).reshape([
len(file_list),
int(self.headers["znodes"]),
int(self.headers["ynodes"]),
int(self.headers["xnodes"]),
int(self.headers["valuedim"]),
])
print("Matrix shape:", array.shape)
return array, np.array(time)
def get_files_names(self):
file_list = glob.glob(
self._path + "/" + self._parms.getParms["head"] + '*.ovf')[
::self._parms.getParms["nStep"]] # files filtering
return sorted(file_list, key=lambda x: int(re.findall(r'\d+', x)[-1]))[
self._parms.getParms["tStart"]:self._parms.getParms["tStop"]]
def load_file(self, path):
with open(path, 'rb') as f:
a = self.catch_headers(f)
out_arr = np.fromfile(f, '<f4', count=int(a['znodes'] * a['ynodes'] * a['xnodes'] * a['valuedim'] + 1))
return out_arr[1:], a
def catch_headers(self, file):
headers = {}
capture_keys = ("xmin:", "ymin:", "zmin:", "xmin:", "ymin:", "zmin:", "xstepsize:",
"ystepsize:", "zstepsize:", "xnodes:", "ynodes:", "znodes:", "valuedim:", "Desc:")
while True:
a = file.readline().strip().decode('ASCII')
a = a.split()
if a[1] in capture_keys:
headers[a[1][:-1]] = float(a[-2]) if a[-1] is 's' else float(a[-1])
elif a[2] == 'Data':
break
return headers
def parse_array(self, arr):
return arr.reshape(1,
int(self.headers['znodes']),
int(self.headers['ynodes']),
int(self.headers['xnodes']),
int(self.headers['valuedim']))
def getarray_size(self):
znodes = int(self.headers['znodes'])
ynodes = int(self.headers['ynodes'])
xnodes = int(self.headers['xnodes'])
nOfComp = int(self.headers['valuedim'])
return xnodes * ynodes * znodes * nOfComp + 1
def save(self, path=None):
if path is None:
path = os.path.dirname(os.path.realpath(self._path)) + "/arr.npz"
np.savez(path, array=self.array, headers=self.headers,
path=self._path, time=self.time)
print("Data saved to the ", path)
@property
def avgtime(self):
if os.path.isdir(self._path.split("arr.npz")[0]):
return (self.time[-1] - self.time[0]) / len(self.time)
else:
return self.time
@property
def shape(self):
return self.array.shape
@property
def geom_shape(self):
return self.array.shape[1:4]
@property
def x(self):
return self.array[0, 0, :, :, 0]
@property
def y(self):
return self.array[0, 0, :, :, 1]
@property
def z(self):
return self.array[0, 0, :, :, 2]
if __name__ == "__main__":
pass
| [
"numpy.load",
"os.path.isdir",
"numpy.allclose",
"os.path.realpath",
"parameters.ovfParms",
"re.findall",
"numpy.array",
"glob.glob",
"numpy.savez",
"multiprocessing.cpu_count"
] | [((1600, 1624), 'numpy.allclose', 'np.allclose', (['self', 'other'], {}), '(self, other)\n', (1611, 1624), True, 'import numpy as np\n'), ((4557, 4648), 'numpy.savez', 'np.savez', (['path'], {'array': 'self.array', 'headers': 'self.headers', 'path': 'self._path', 'time': 'self.time'}), '(path, array=self.array, headers=self.headers, path=self._path,\n time=self.time)\n', (4565, 4648), True, 'import numpy as np\n'), ((286, 296), 'parameters.ovfParms', 'ovfParms', ([], {}), '()\n', (294, 296), False, 'from parameters import ovfParms\n'), ((563, 588), 'os.path.isdir', 'os.path.isdir', (['self._path'], {}), '(self._path)\n', (576, 588), False, 'import os\n'), ((1672, 1685), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1679, 1685), True, 'import numpy as np\n'), ((2621, 2635), 'numpy.array', 'np.array', (['time'], {}), '(time)\n', (2629, 2635), True, 'import numpy as np\n'), ((2691, 2759), 'glob.glob', 'glob.glob', (["(self._path + '/' + self._parms.getParms['head'] + '*.ovf')"], {}), "(self._path + '/' + self._parms.getParms['head'] + '*.ovf')\n", (2700, 2759), False, 'import glob\n'), ((2296, 2329), 'numpy.array', 'np.array', (['array'], {'dtype': 'np.float32'}), '(array, dtype=np.float32)\n', (2304, 2329), True, 'import numpy as np\n'), ((4505, 4533), 'os.path.realpath', 'os.path.realpath', (['self._path'], {}), '(self._path)\n', (4521, 4533), False, 'import os\n'), ((2119, 2133), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (2131, 2133), True, 'import multiprocessing as mp\n'), ((2057, 2071), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (2069, 2071), True, 'import multiprocessing as mp\n'), ((2900, 2921), 're.findall', 're.findall', (['"""\\\\d+"""', 'x'], {}), "('\\\\d+', x)\n", (2910, 2921), False, 'import re\n')] |
"""Copyright (c) Microsoft Corporation. Licensed under the MIT license.
Uniter for RE model
"""
from collections import defaultdict
import torch
from torch import nn
import random
import numpy as np
from .layer import GELU
from .model import UniterPreTrainedModel, UniterModel
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
except ImportError:
logger.info('Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .')
from torch.nn import LayerNorm
class UniterForReferringExpressionComprehension(UniterPreTrainedModel):
"""Finetune UNITER for RE."""
def __init__(self, config, img_dim, loss='cls', margin=0.2, hard_ratio=0.3, mlp=1):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
if mlp == 1:
self.re_output = nn.Linear(config.hidden_size, 1)
elif mlp == 2:
self.re_output = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size), GELU(), LayerNorm(config.hidden_size, eps=1e-12),
nn.Linear(config.hidden_size, 1))
else:
raise ValueError('MLP restricted to be 1 or 2 layers.')
self.loss = loss
assert self.loss in ['cls', 'rank']
if self.loss == 'rank':
self.margin = margin
self.hard_ratio = hard_ratio
else:
self.crit = nn.CrossEntropyLoss(reduction='none')
self.apply(self.init_weights)
def forward(self, batch, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
obj_masks = batch['obj_masks']
sequence_output = self.uniter(
input_ids, position_ids, img_feat, img_pos_feat, attn_masks, gather_index, output_all_encoded_layers=False)
# get only the region part
txt_lens, num_bbs = batch['txt_lens'], batch['num_bbs']
sequence_output = self._get_image_hidden(sequence_output, txt_lens, num_bbs)
# re score (n, max_num_bb)
scores = self.re_output(sequence_output).squeeze(2)
scores = scores.masked_fill(obj_masks, -1e4) # mask out non-objects
if compute_loss:
targets = batch['targets']
if self.loss == 'cls':
ce_loss = self.crit(scores, targets.squeeze(-1)) # (n, ) as no reduction
return ce_loss
else:
# ranking
_n = len(num_bbs)
# positive (target)
pos_ix = targets
pos_sc = scores.gather(1, pos_ix.view(_n, 1)) # (n, 1)
pos_sc = torch.sigmoid(pos_sc).view(-1) # (n, ) sc[0, 1]
# negative
neg_ix = self.sample_neg_ix(scores, targets, num_bbs)
neg_sc = scores.gather(1, neg_ix.view(_n, 1)) # (n, 1)
neg_sc = torch.sigmoid(neg_sc).view(-1) # (n, ) sc[0, 1]
# ranking
mm_loss = torch.clamp(self.margin + neg_sc - pos_sc, 0) # (n, )
return mm_loss
else:
# (n, max_num_bb)
return scores
def sample_neg_ix(self, scores, targets, num_bbs):
"""
Inputs:
:scores (n, max_num_bb)
:targets (n, )
:num_bbs list of [num_bb]
return:
:neg_ix (n, ) easy/hard negative (!= target)
"""
neg_ix = []
cand_ixs = torch.argsort(scores, dim=-1, descending=True) # (n, num_bb)
for i in range(len(num_bbs)):
num_bb = num_bbs[i]
if np.random.uniform(0, 1, 1) < self.hard_ratio:
# sample hard negative, w/ highest score
for ix in cand_ixs[i].tolist():
if ix != targets[i]:
assert ix < num_bb, f'ix={ix}, num_bb={num_bb}'
neg_ix.append(ix)
break
else:
# sample easy negative, i.e., random one
ix = random.randint(0, num_bb - 1) # [0, num_bb-1]
while ix == targets[i]:
ix = random.randint(0, num_bb - 1)
neg_ix.append(ix)
neg_ix = torch.tensor(neg_ix).type(targets.type())
assert neg_ix.numel() == targets.numel()
return neg_ix
def _get_image_hidden(self, sequence_output, txt_lens, num_bbs):
"""
Extracting the img_hidden part from sequence_output.
Inputs:
- sequence_output: (n, txt_len+num_bb, hid_size)
- txt_lens : [txt_len]
- num_bbs : [num_bb]
Output:
- img_hidden : (n, max_num_bb, hid_size)
"""
outputs = []
max_bb = max(num_bbs)
hid_size = sequence_output.size(-1)
for seq_out, len_, nbb in zip(sequence_output.split(1, dim=0), txt_lens, num_bbs):
img_hid = seq_out[:, len_:len_ + nbb, :]
if nbb < max_bb:
img_hid = torch.cat([img_hid, self._get_pad(img_hid, max_bb - nbb, hid_size)], dim=1)
outputs.append(img_hid)
img_hidden = torch.cat(outputs, dim=0)
return img_hidden
def _get_pad(self, t, len_, hidden_size):
pad = torch.zeros(1, len_, hidden_size, dtype=t.dtype, device=t.device)
return pad
| [
"numpy.random.uniform",
"random.randint",
"torch.argsort",
"torch.cat",
"torch.nn.CrossEntropyLoss",
"collections.defaultdict",
"torch.nn.LayerNorm",
"torch.sigmoid",
"torch.clamp",
"torch.nn.Linear",
"torch.zeros",
"torch.tensor"
] | [((1566, 1599), 'collections.defaultdict', 'defaultdict', (['(lambda : None)', 'batch'], {}), '(lambda : None, batch)\n', (1577, 1599), False, 'from collections import defaultdict\n'), ((3675, 3721), 'torch.argsort', 'torch.argsort', (['scores'], {'dim': '(-1)', 'descending': '(True)'}), '(scores, dim=-1, descending=True)\n', (3688, 3721), False, 'import torch\n'), ((5358, 5383), 'torch.cat', 'torch.cat', (['outputs'], {'dim': '(0)'}), '(outputs, dim=0)\n', (5367, 5383), False, 'import torch\n'), ((5471, 5536), 'torch.zeros', 'torch.zeros', (['(1)', 'len_', 'hidden_size'], {'dtype': 't.dtype', 'device': 't.device'}), '(1, len_, hidden_size, dtype=t.dtype, device=t.device)\n', (5482, 5536), False, 'import torch\n'), ((861, 893), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', '(1)'], {}), '(config.hidden_size, 1)\n', (870, 893), False, 'from torch import nn\n'), ((1423, 1460), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (1442, 1460), False, 'from torch import nn\n'), ((3216, 3261), 'torch.clamp', 'torch.clamp', (['(self.margin + neg_sc - pos_sc)', '(0)'], {}), '(self.margin + neg_sc - pos_sc, 0)\n', (3227, 3261), False, 'import torch\n'), ((3822, 3848), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (3839, 3848), True, 'import numpy as np\n'), ((4254, 4283), 'random.randint', 'random.randint', (['(0)', '(num_bb - 1)'], {}), '(0, num_bb - 1)\n', (4268, 4283), False, 'import random\n'), ((4447, 4467), 'torch.tensor', 'torch.tensor', (['neg_ix'], {}), '(neg_ix)\n', (4459, 4467), False, 'import torch\n'), ((977, 1026), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.hidden_size'], {}), '(config.hidden_size, config.hidden_size)\n', (986, 1026), False, 'from torch import nn\n'), ((1036, 1076), 'torch.nn.LayerNorm', 'LayerNorm', (['config.hidden_size'], {'eps': '(1e-12)'}), '(config.hidden_size, eps=1e-12)\n', (1045, 1076), False, 'from torch.nn import LayerNorm\n'), ((1094, 1126), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', '(1)'], {}), '(config.hidden_size, 1)\n', (1103, 1126), False, 'from torch import nn\n'), ((4366, 4395), 'random.randint', 'random.randint', (['(0)', '(num_bb - 1)'], {}), '(0, num_bb - 1)\n', (4380, 4395), False, 'import random\n'), ((2872, 2893), 'torch.sigmoid', 'torch.sigmoid', (['pos_sc'], {}), '(pos_sc)\n', (2885, 2893), False, 'import torch\n'), ((3115, 3136), 'torch.sigmoid', 'torch.sigmoid', (['neg_sc'], {}), '(neg_sc)\n', (3128, 3136), False, 'import torch\n')] |
import numpy as np
# importing from alphaBetaLab the needed components
from alphaBetaLab.abOptionManager import abOptions
from alphaBetaLab.abEstimateAndSave import triMeshSpecFromMshFile, abEstimateAndSaveTriangularEtopo1
# definition of the spectral grid
dirs = np.linspace(0, 2*np.pi, 25)
nfreq = 25
minfrq = .04118
frqfactor = 1.1
freqs = [minfrq*(frqfactor**i) for i in range(1,nfreq + 1)]
# definition of the spatial mesh
gridname = 'ww3'
mshfile = 'med.msh'
triMeshSpec = triMeshSpecFromMshFile(mshfile)
# path of the etopo1 bathymetry
etopoFilePath = '/home/lmentaschi/usr/WaveWatchIII/gridgen1.1/reference_data/etopo1.nc'
# output directory
outputDestDir = './output/'
# number of cores for parallel computing
nParWorker = 12
nParWorker = 4
# this option indicates that the computation should be skipped for cells smaller than 3 km
minSizeKm = 3
opt = abOptions(minSizeKm=minSizeKm)
# instruction to do the computation and save the output
abEstimateAndSaveTriangularEtopo1(dirs, freqs, gridname, triMeshSpec, etopoFilePath, outputDestDir, nParWorker, abOptions=opt)
| [
"alphaBetaLab.abEstimateAndSave.abEstimateAndSaveTriangularEtopo1",
"alphaBetaLab.abOptionManager.abOptions",
"alphaBetaLab.abEstimateAndSave.triMeshSpecFromMshFile",
"numpy.linspace"
] | [((266, 295), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(25)'], {}), '(0, 2 * np.pi, 25)\n', (277, 295), True, 'import numpy as np\n'), ((482, 513), 'alphaBetaLab.abEstimateAndSave.triMeshSpecFromMshFile', 'triMeshSpecFromMshFile', (['mshfile'], {}), '(mshfile)\n', (504, 513), False, 'from alphaBetaLab.abEstimateAndSave import triMeshSpecFromMshFile, abEstimateAndSaveTriangularEtopo1\n'), ((868, 898), 'alphaBetaLab.abOptionManager.abOptions', 'abOptions', ([], {'minSizeKm': 'minSizeKm'}), '(minSizeKm=minSizeKm)\n', (877, 898), False, 'from alphaBetaLab.abOptionManager import abOptions\n'), ((956, 1086), 'alphaBetaLab.abEstimateAndSave.abEstimateAndSaveTriangularEtopo1', 'abEstimateAndSaveTriangularEtopo1', (['dirs', 'freqs', 'gridname', 'triMeshSpec', 'etopoFilePath', 'outputDestDir', 'nParWorker'], {'abOptions': 'opt'}), '(dirs, freqs, gridname, triMeshSpec,\n etopoFilePath, outputDestDir, nParWorker, abOptions=opt)\n', (989, 1086), False, 'from alphaBetaLab.abEstimateAndSave import triMeshSpecFromMshFile, abEstimateAndSaveTriangularEtopo1\n')] |
import numpy as np
import scipy
import scipy.misc
import os
def save_img(img, dir, name, count):
if os.path.isdir(dir) is False:
os.makedirs(dir)
n = int(np.sqrt(img.shape[0]))
img = img.data.cpu().numpy().transpose(0,2,3,1)
out_img = np.zeros((64*n,64*n,3))
for r in range(n):
for c in range(n):
out_img[r*64:(r+1)*64,c*64:(c+1)*64,:] = img[r*n+c]
scipy.misc.imsave(os.path.join(dir, str(count)+'_'+name+'.jpg'), out_img)
| [
"os.path.isdir",
"numpy.zeros",
"os.makedirs",
"numpy.sqrt"
] | [((260, 289), 'numpy.zeros', 'np.zeros', (['(64 * n, 64 * n, 3)'], {}), '((64 * n, 64 * n, 3))\n', (268, 289), True, 'import numpy as np\n'), ((105, 123), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (118, 123), False, 'import os\n'), ((142, 158), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (153, 158), False, 'import os\n'), ((171, 192), 'numpy.sqrt', 'np.sqrt', (['img.shape[0]'], {}), '(img.shape[0])\n', (178, 192), True, 'import numpy as np\n')] |
import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
def compare_filters(iir_b, iir_a, fir_b, fs=1):
# compute response for IIR filter
w_iir, h_iir = scipy.signal.freqz(iir_b, iir_a, fs=fs, worN=2048)
# compute response for FIR filter
w_fir, h_fir = scipy.signal.freqz(fir_b, fs=fs)
h_iir_db = 20 * np.log10(np.abs(h_iir)+1e-8)
h_fir_db = 20 * np.log10(np.abs(h_fir)+1e-8)
plt.plot(w_iir, h_iir_db, label="IIR filter")
plt.plot(w_fir, h_fir_db, label="FIR approx. filter")
plt.xscale('log')
plt.ylim([-50, 10])
plt.xlim([10, 22.05e3])
plt.xlabel("Freq. (Hz)")
plt.ylabel("Mag. (dB)")
plt.legend()
plt.grid()
plt.show()
| [
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] | [((426, 471), 'matplotlib.pyplot.plot', 'plt.plot', (['w_iir', 'h_iir_db'], {'label': '"""IIR filter"""'}), "(w_iir, h_iir_db, label='IIR filter')\n", (434, 471), True, 'import matplotlib.pyplot as plt\n'), ((476, 529), 'matplotlib.pyplot.plot', 'plt.plot', (['w_fir', 'h_fir_db'], {'label': '"""FIR approx. filter"""'}), "(w_fir, h_fir_db, label='FIR approx. filter')\n", (484, 529), True, 'import matplotlib.pyplot as plt\n'), ((534, 551), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (544, 551), True, 'import matplotlib.pyplot as plt\n'), ((556, 575), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-50, 10]'], {}), '([-50, 10])\n', (564, 575), True, 'import matplotlib.pyplot as plt\n'), ((580, 603), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[10, 22050.0]'], {}), '([10, 22050.0])\n', (588, 603), True, 'import matplotlib.pyplot as plt\n'), ((608, 632), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Freq. (Hz)"""'], {}), "('Freq. (Hz)')\n", (618, 632), True, 'import matplotlib.pyplot as plt\n'), ((637, 660), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mag. (dB)"""'], {}), "('Mag. (dB)')\n", (647, 660), True, 'import matplotlib.pyplot as plt\n'), ((665, 677), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (675, 677), True, 'import matplotlib.pyplot as plt\n'), ((682, 692), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (690, 692), True, 'import matplotlib.pyplot as plt\n'), ((697, 707), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (705, 707), True, 'import matplotlib.pyplot as plt\n'), ((352, 365), 'numpy.abs', 'np.abs', (['h_iir'], {}), '(h_iir)\n', (358, 365), True, 'import numpy as np\n'), ((401, 414), 'numpy.abs', 'np.abs', (['h_fir'], {}), '(h_fir)\n', (407, 414), True, 'import numpy as np\n')] |
from .petitradtrans import petitRADTRANSModel
import numpy as np
from taurex.exceptions import InvalidModelException
from taurex.core import fitparam
class DirectImageRADTRANS(petitRADTRANSModel):
@classmethod
def input_keywords(self):
return ['directimage-petitrad', 'direct-petitrad', ]
def build_atmosphere_object(self):
return self._radtrans.Radtrans(line_species=self.linespecies, \
rayleigh_species = self.rayleigh_species, \
continuum_opacities = self.continuum_species, \
wlen_bords_micron = self._wlen_micron)
def path_integral(self, wngrid, return_contrib):
from taurex.constants import RJUP,RSOL
import astropy.units as u
abundances, temperature, MMW, Rp, gravity, p0bar = self.setup_parameters()
self.info('Molecular abundances at surface: %s',[ (k,v[-1]) for k,v in abundances.items()])
self.info('Temperature at surface %s',temperature[-1])
self.info('MMw at surface %s',MMW[-1])
self.info('Planet radius: %s',Rp)
self.info('Gravity in cm/2 at surface: %s',gravity)
self.info('P0 = radius: %s',p0bar)
self._atmosphere.calc_flux(temperature, abundances, gravity, MMW)
integral = self._atmosphere.flux[::-1]
petit_flux_u = integral * u.erg / u.cm**2 / u.s / u.Hz
petit_flux_W = petit_flux_u.to(u.W/u.m**2/u.um, equivalencies=u.spectral_density(self.nativeWavenumberGrid*u.k)).value
#print(integral)
native = self.nativeWavenumberGrid
native_filt = (native >= wngrid.min()) & (native <= wngrid.max())
petit_flux_W = petit_flux_W[native_filt]
if np.any(np.isnan(petit_flux_W)):
raise InvalidModelException
return petit_flux_W, np.zeros(shape=(self.nLayers,wngrid.shape[0])) | [
"astropy.units.spectral_density",
"numpy.zeros",
"numpy.isnan"
] | [((1719, 1741), 'numpy.isnan', 'np.isnan', (['petit_flux_W'], {}), '(petit_flux_W)\n', (1727, 1741), True, 'import numpy as np\n'), ((1814, 1861), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.nLayers, wngrid.shape[0])'}), '(shape=(self.nLayers, wngrid.shape[0]))\n', (1822, 1861), True, 'import numpy as np\n'), ((1451, 1502), 'astropy.units.spectral_density', 'u.spectral_density', (['(self.nativeWavenumberGrid * u.k)'], {}), '(self.nativeWavenumberGrid * u.k)\n', (1469, 1502), True, 'import astropy.units as u\n')] |
import unittest
import numpy as np
from utils import gaussian_mixture, add_outliers
class MyTestCase(unittest.TestCase):
def test_gaussian_mixture(self):
X = gaussian_mixture(n_samples=100, n_clusters=4,
n_outliers=10, n_features=2,
means=np.array([[1, 1],
[1, -1],
[-1, 1],
[-1, -1]]),
outliers_dist_factor=100)
assert X.shape == (100, 2)
norms = np.sort(np.linalg.norm(X, axis=1))
assert norms[-10] > 10 * norms[-11]
def test_add_outliers(self):
X = gaussian_mixture(n_samples=100, n_clusters=4,
n_outliers=0, n_features=2,
means=np.array([[1, 1],
[1, -1],
[-1, 1],
[-1, -1]]))
assert X.shape == (100, 2)
X, outlier_idxs = add_outliers(X, n_outliers=10, dist_factor=100, return_index=True)
assert X.shape == (110, 2)
norms = np.sort(np.linalg.norm(X, axis=1))
assert norms[-10] > 10 * norms[-11]
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"utils.add_outliers",
"numpy.linalg.norm",
"numpy.array"
] | [((1339, 1354), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1352, 1354), False, 'import unittest\n'), ((1109, 1175), 'utils.add_outliers', 'add_outliers', (['X'], {'n_outliers': '(10)', 'dist_factor': '(100)', 'return_index': '(True)'}), '(X, n_outliers=10, dist_factor=100, return_index=True)\n', (1121, 1175), False, 'from utils import gaussian_mixture, add_outliers\n'), ((609, 634), 'numpy.linalg.norm', 'np.linalg.norm', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (623, 634), True, 'import numpy as np\n'), ((1235, 1260), 'numpy.linalg.norm', 'np.linalg.norm', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (1249, 1260), True, 'import numpy as np\n'), ((312, 358), 'numpy.array', 'np.array', (['[[1, 1], [1, -1], [-1, 1], [-1, -1]]'], {}), '([[1, 1], [1, -1], [-1, 1], [-1, -1]])\n', (320, 358), True, 'import numpy as np\n'), ((864, 910), 'numpy.array', 'np.array', (['[[1, 1], [1, -1], [-1, 1], [-1, -1]]'], {}), '([[1, 1], [1, -1], [-1, 1], [-1, -1]])\n', (872, 910), True, 'import numpy as np\n')] |
"""TEsting functions
"""
import numpy as np
from energy_demand.basic import basic_functions
from energy_demand.basic import lookup_tables
def test_if_minus_value_in_array(arraytotest):#, tolerance_min_max=0.00000000001):
"""Test if array has negative value according to a tolerance
criteria
Arguments
---------
arraytotest : array
Input to test for negative values
tolerance_min_max : float
Minimum and maximum tolerance criteria
Returns
-------
bool : Whether empty number in array or not
"""
only_neg_elements = arraytotest[arraytotest < 0]
if len(only_neg_elements) > 0:
#for element in only_neg_elements:
#if (element > tolerance_min_max) or (element < tolerance_min_max):
#if element < 0:
print("---")
print("Sum of all negative: " + str(np.sum(only_neg_elements)))
print("Average negative value: " + str(np.sum(only_neg_elements) / len(only_neg_elements)))
print("Smalles value: " + str(np.min(only_neg_elements)))
return True
else:
return False
def switch_testing(fuel_switches, service_switches, capacity_switches):
"""Test if swithes defined for same enduse
Arguments
---------
fuel_switches : list
Switches
service_switches : list
Switches
capacity_switches : list
Switches
"""
all_switches_incl_sectors = {}
enduses_service_switch = set([])
for switch in service_switches:
enduses_service_switch.add(switch.enduse)
# Collect all enduses and affected sectors
if switch.enduse not in all_switches_incl_sectors:
all_switches_incl_sectors[switch.enduse] = set([])
if not switch.sector:
all_switches_incl_sectors[switch.enduse] = None
else:
all_switches_incl_sectors[switch.enduse].add(switch.sector)
else:
if not switch.sector:
pass
else:
all_switches_incl_sectors[switch.enduse].add(switch.sector)
enduses_capacity_switch = set([])
for switch in capacity_switches:
enduses_capacity_switch.add(switch.enduse)
# Collect all enduses and affected sectors
if switch.enduse not in all_switches_incl_sectors:
all_switches_incl_sectors[switch.enduse] = set([])
if not switch.sector:
all_switches_incl_sectors[switch.enduse] = None
else:
all_switches_incl_sectors[switch.enduse].add(switch.sector)
else:
if not switch.sector:
pass
else:
all_switches_incl_sectors[switch.enduse].add(switch.sector)
enduses_service_switch = list(enduses_service_switch)
enduses_capacity_switch = list(enduses_capacity_switch)
for enduse in all_switches_incl_sectors:
if all_switches_incl_sectors[enduse] != None:
all_switches_incl_sectors[enduse] = list(all_switches_incl_sectors[enduse])
return all_switches_incl_sectors
def testing_fuel_tech_shares(fuel_tech_fueltype_p):
"""Test if assigned fuel share add up to 1 within each fueltype
Paramteres
----------
fuel_tech_fueltype_p : dict
Fueltype fraction of technologies
"""
for enduse in fuel_tech_fueltype_p:
sector_crit = basic_functions.test_if_sector(
fuel_tech_fueltype_p[enduse])
if sector_crit:
for sector in fuel_tech_fueltype_p[enduse]:
for fueltype in fuel_tech_fueltype_p[enduse][sector]:
if fuel_tech_fueltype_p[enduse][sector][fueltype] != {}:
if round(sum(fuel_tech_fueltype_p[enduse][sector][fueltype].values()), 3) != 1.0:
raise Exception(
"The fuel shares assumptions are wrong for enduse {} and fueltype {} SUM: {}".format(
enduse, fueltype, sum(fuel_tech_fueltype_p[enduse][sector][fueltype].values())))
else:
for fueltype in fuel_tech_fueltype_p[enduse]:
if fuel_tech_fueltype_p[enduse][fueltype] != {}:
if round(sum(fuel_tech_fueltype_p[enduse][fueltype].values()), 3) != 1.0:
raise Exception(
"The fuel shares assumptions are wrong for enduse {} and fueltype {} SUM: {}".format(
enduse, fueltype, sum(fuel_tech_fueltype_p[enduse][fueltype].values())))
def testing_tech_defined(technologies, all_tech_enduse):
"""Test if all technologies are defined for assigned fuels
Arguments
----------
technologies : dict
Technologies
all_tech_enduse : dict
All technologies per enduse with assigned fuel shares
"""
for enduse in all_tech_enduse:
for sector in all_tech_enduse[enduse]:
for tech in all_tech_enduse[enduse][sector]:
if tech not in technologies:
raise Exception(
"Error: '{}' is not defined in technology_definition.csv".format(
tech))
def test_function_fuel_sum(
data,
fuel_disagg,
mode_constrained,
space_heating_enduses
):
""" Sum raw disaggregated fuel data
"""
lookups = lookup_tables.basic_lookups()
fuel_in = 0
fuel_in_solid_fuel = 0
fuel_in_gas = 0
fuel_in_elec = 0
fuel_in_oil = 0
fuel_in_heat = 0
fuel_in_hydrogen = 0
fuel_in_biomass = 0
tot_heating = 0
dummy_sector = None
for region in fuel_disagg['residential']:
for enduse in fuel_disagg['residential'][region]:
fuel_in += np.sum(fuel_disagg['residential'][region][enduse][dummy_sector])
fuel_in_heat += np.sum(fuel_disagg['residential'][region][enduse][dummy_sector][lookups['fueltypes']['heat']])
if mode_constrained == False and enduse in space_heating_enduses: #Exclude inputs for heating
tot_heating += np.sum(fuel_disagg['residential'][region][enduse][dummy_sector])
else:
fuel_in_elec += np.sum(fuel_disagg['residential'][region][enduse][dummy_sector][lookups['fueltypes']['electricity']])
fuel_in_gas += np.sum(fuel_disagg['residential'][region][enduse][dummy_sector][lookups['fueltypes']['gas']])
fuel_in_hydrogen += np.sum(fuel_disagg['residential'][region][enduse][dummy_sector][lookups['fueltypes']['hydrogen']])
fuel_in_oil += np.sum(fuel_disagg['residential'][region][enduse][dummy_sector][lookups['fueltypes']['oil']])
fuel_in_solid_fuel += np.sum(fuel_disagg['residential'][region][enduse][dummy_sector][lookups['fueltypes']['solid_fuel']])
fuel_in_biomass += np.sum(fuel_disagg['residential'][region][enduse][dummy_sector][lookups['fueltypes']['biomass']])
for region in fuel_disagg['service']:
for enduse in fuel_disagg['service'][region]:
for sector in fuel_disagg['service'][region][enduse]:
fuel_in += np.sum(fuel_disagg['service'][region][enduse][sector])
fuel_in_heat += np.sum(fuel_disagg['service'][region][enduse][sector][lookups['fueltypes']['heat']])
if mode_constrained == False and enduse in space_heating_enduses:
tot_heating += np.sum(fuel_disagg['service'][region][enduse][sector])
else:
fuel_in_elec += np.sum(fuel_disagg['service'][region][enduse][sector][lookups['fueltypes']['electricity']])
fuel_in_gas += np.sum(fuel_disagg['service'][region][enduse][sector][lookups['fueltypes']['gas']])
fuel_in_hydrogen += np.sum(fuel_disagg['service'][region][enduse][sector][lookups['fueltypes']['hydrogen']])
fuel_in_oil += np.sum(fuel_disagg['service'][region][enduse][sector][lookups['fueltypes']['oil']])
fuel_in_solid_fuel += np.sum(fuel_disagg['service'][region][enduse][sector][lookups['fueltypes']['solid_fuel']])
fuel_in_biomass += np.sum(fuel_disagg['service'][region][enduse][sector][lookups['fueltypes']['biomass']])
for region in fuel_disagg['industry']:
for enduse in fuel_disagg['industry'][region]:
for sector in fuel_disagg['industry'][region][enduse]:
fuel_in += np.sum(fuel_disagg['industry'][region][enduse][sector])
fuel_in_heat += np.sum(fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']['heat']])
if mode_constrained == False and enduse in space_heating_enduses:
tot_heating += np.sum(fuel_disagg['industry'][region][enduse][sector])
else:
fuel_in_elec += np.sum(fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']['electricity']])
fuel_in_gas += np.sum(fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']['gas']])
fuel_in_hydrogen += np.sum(fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']['hydrogen']])
fuel_in_oil += np.sum(fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']['oil']])
fuel_in_solid_fuel += np.sum(fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']['solid_fuel']])
fuel_in_biomass += np.sum(fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']['biomass']])
out_dict = {
"fuel_in": fuel_in,
"fuel_in_biomass": fuel_in_biomass,
"fuel_in_elec": fuel_in_elec,
"fuel_in_gas": fuel_in_gas,
"fuel_in_heat": fuel_in_heat,
"fuel_in_hydrogen": fuel_in_hydrogen,
"fuel_in_solid_fuel": fuel_in_solid_fuel,
"fuel_in_oil": fuel_in_oil,
"tot_heating": tot_heating}
return out_dict
def control_disaggregation(fuel_disagg, national_fuel, enduses, sectors):
"""Check if disaggregation is correct
Arguments
---------
fuel_disagg : dict
Disaggregated fuel to regions
national_fuel : dict
National fuel
enduses : list
Enduses
sectors : list, bool=False
Sectors
"""
control_sum_disagg, control_sum_national = 0, 0
for reg in fuel_disagg:
for enduse in fuel_disagg[reg]:
for sector in fuel_disagg[reg][enduse]:
control_sum_disagg += np.sum(fuel_disagg[reg][enduse][sector])
for sector in sectors:
for enduse in enduses:
control_sum_national += np.sum(national_fuel[enduse][sector])
#The loaded floor area must correspond to provided fuel sectors numers
np.testing.assert_almost_equal(
control_sum_disagg,
control_sum_national,
decimal=2, err_msg="disagregation error ss {} {}".format(
control_sum_disagg, control_sum_national))
| [
"energy_demand.basic.lookup_tables.basic_lookups",
"energy_demand.basic.basic_functions.test_if_sector",
"numpy.sum",
"numpy.min"
] | [((5408, 5437), 'energy_demand.basic.lookup_tables.basic_lookups', 'lookup_tables.basic_lookups', ([], {}), '()\n', (5435, 5437), False, 'from energy_demand.basic import lookup_tables\n'), ((3401, 3461), 'energy_demand.basic.basic_functions.test_if_sector', 'basic_functions.test_if_sector', (['fuel_tech_fueltype_p[enduse]'], {}), '(fuel_tech_fueltype_p[enduse])\n', (3431, 3461), False, 'from energy_demand.basic import basic_functions\n'), ((5783, 5847), 'numpy.sum', 'np.sum', (["fuel_disagg['residential'][region][enduse][dummy_sector]"], {}), "(fuel_disagg['residential'][region][enduse][dummy_sector])\n", (5789, 5847), True, 'import numpy as np\n'), ((5876, 5975), 'numpy.sum', 'np.sum', (["fuel_disagg['residential'][region][enduse][dummy_sector][lookups[\n 'fueltypes']['heat']]"], {}), "(fuel_disagg['residential'][region][enduse][dummy_sector][lookups[\n 'fueltypes']['heat']])\n", (5882, 5975), True, 'import numpy as np\n'), ((10717, 10754), 'numpy.sum', 'np.sum', (['national_fuel[enduse][sector]'], {}), '(national_fuel[enduse][sector])\n', (10723, 10754), True, 'import numpy as np\n'), ((6109, 6173), 'numpy.sum', 'np.sum', (["fuel_disagg['residential'][region][enduse][dummy_sector]"], {}), "(fuel_disagg['residential'][region][enduse][dummy_sector])\n", (6115, 6173), True, 'import numpy as np\n'), ((6224, 6330), 'numpy.sum', 'np.sum', (["fuel_disagg['residential'][region][enduse][dummy_sector][lookups[\n 'fueltypes']['electricity']]"], {}), "(fuel_disagg['residential'][region][enduse][dummy_sector][lookups[\n 'fueltypes']['electricity']])\n", (6230, 6330), True, 'import numpy as np\n'), ((6357, 6455), 'numpy.sum', 'np.sum', (["fuel_disagg['residential'][region][enduse][dummy_sector][lookups[\n 'fueltypes']['gas']]"], {}), "(fuel_disagg['residential'][region][enduse][dummy_sector][lookups[\n 'fueltypes']['gas']])\n", (6363, 6455), True, 'import numpy as np\n'), ((6487, 6590), 'numpy.sum', 'np.sum', (["fuel_disagg['residential'][region][enduse][dummy_sector][lookups[\n 'fueltypes']['hydrogen']]"], {}), "(fuel_disagg['residential'][region][enduse][dummy_sector][lookups[\n 'fueltypes']['hydrogen']])\n", (6493, 6590), True, 'import numpy as np\n'), ((6617, 6715), 'numpy.sum', 'np.sum', (["fuel_disagg['residential'][region][enduse][dummy_sector][lookups[\n 'fueltypes']['oil']]"], {}), "(fuel_disagg['residential'][region][enduse][dummy_sector][lookups[\n 'fueltypes']['oil']])\n", (6623, 6715), True, 'import numpy as np\n'), ((6749, 6854), 'numpy.sum', 'np.sum', (["fuel_disagg['residential'][region][enduse][dummy_sector][lookups[\n 'fueltypes']['solid_fuel']]"], {}), "(fuel_disagg['residential'][region][enduse][dummy_sector][lookups[\n 'fueltypes']['solid_fuel']])\n", (6755, 6854), True, 'import numpy as np\n'), ((6885, 6987), 'numpy.sum', 'np.sum', (["fuel_disagg['residential'][region][enduse][dummy_sector][lookups[\n 'fueltypes']['biomass']]"], {}), "(fuel_disagg['residential'][region][enduse][dummy_sector][lookups[\n 'fueltypes']['biomass']])\n", (6891, 6987), True, 'import numpy as np\n'), ((7173, 7227), 'numpy.sum', 'np.sum', (["fuel_disagg['service'][region][enduse][sector]"], {}), "(fuel_disagg['service'][region][enduse][sector])\n", (7179, 7227), True, 'import numpy as np\n'), ((7260, 7349), 'numpy.sum', 'np.sum', (["fuel_disagg['service'][region][enduse][sector][lookups['fueltypes']['heat']]"], {}), "(fuel_disagg['service'][region][enduse][sector][lookups['fueltypes'][\n 'heat']])\n", (7266, 7349), True, 'import numpy as np\n'), ((8504, 8559), 'numpy.sum', 'np.sum', (["fuel_disagg['industry'][region][enduse][sector]"], {}), "(fuel_disagg['industry'][region][enduse][sector])\n", (8510, 8559), True, 'import numpy as np\n'), ((8592, 8682), 'numpy.sum', 'np.sum', (["fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']['heat']]"], {}), "(fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']\n ['heat']])\n", (8598, 8682), True, 'import numpy as np\n'), ((10581, 10621), 'numpy.sum', 'np.sum', (['fuel_disagg[reg][enduse][sector]'], {}), '(fuel_disagg[reg][enduse][sector])\n', (10587, 10621), True, 'import numpy as np\n'), ((865, 890), 'numpy.sum', 'np.sum', (['only_neg_elements'], {}), '(only_neg_elements)\n', (871, 890), True, 'import numpy as np\n'), ((1042, 1067), 'numpy.min', 'np.min', (['only_neg_elements'], {}), '(only_neg_elements)\n', (1048, 1067), True, 'import numpy as np\n'), ((7463, 7517), 'numpy.sum', 'np.sum', (["fuel_disagg['service'][region][enduse][sector]"], {}), "(fuel_disagg['service'][region][enduse][sector])\n", (7469, 7517), True, 'import numpy as np\n'), ((7576, 7672), 'numpy.sum', 'np.sum', (["fuel_disagg['service'][region][enduse][sector][lookups['fueltypes'][\n 'electricity']]"], {}), "(fuel_disagg['service'][region][enduse][sector][lookups['fueltypes'][\n 'electricity']])\n", (7582, 7672), True, 'import numpy as np\n'), ((7703, 7791), 'numpy.sum', 'np.sum', (["fuel_disagg['service'][region][enduse][sector][lookups['fueltypes']['gas']]"], {}), "(fuel_disagg['service'][region][enduse][sector][lookups['fueltypes'][\n 'gas']])\n", (7709, 7791), True, 'import numpy as np\n'), ((7827, 7920), 'numpy.sum', 'np.sum', (["fuel_disagg['service'][region][enduse][sector][lookups['fueltypes']['hydrogen']\n ]"], {}), "(fuel_disagg['service'][region][enduse][sector][lookups['fueltypes'][\n 'hydrogen']])\n", (7833, 7920), True, 'import numpy as np\n'), ((7951, 8039), 'numpy.sum', 'np.sum', (["fuel_disagg['service'][region][enduse][sector][lookups['fueltypes']['oil']]"], {}), "(fuel_disagg['service'][region][enduse][sector][lookups['fueltypes'][\n 'oil']])\n", (7957, 8039), True, 'import numpy as np\n'), ((8077, 8172), 'numpy.sum', 'np.sum', (["fuel_disagg['service'][region][enduse][sector][lookups['fueltypes'][\n 'solid_fuel']]"], {}), "(fuel_disagg['service'][region][enduse][sector][lookups['fueltypes'][\n 'solid_fuel']])\n", (8083, 8172), True, 'import numpy as np\n'), ((8207, 8299), 'numpy.sum', 'np.sum', (["fuel_disagg['service'][region][enduse][sector][lookups['fueltypes']['biomass']]"], {}), "(fuel_disagg['service'][region][enduse][sector][lookups['fueltypes'][\n 'biomass']])\n", (8213, 8299), True, 'import numpy as np\n'), ((8796, 8851), 'numpy.sum', 'np.sum', (["fuel_disagg['industry'][region][enduse][sector]"], {}), "(fuel_disagg['industry'][region][enduse][sector])\n", (8802, 8851), True, 'import numpy as np\n'), ((8910, 9007), 'numpy.sum', 'np.sum', (["fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes'][\n 'electricity']]"], {}), "(fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']\n ['electricity']])\n", (8916, 9007), True, 'import numpy as np\n'), ((9038, 9127), 'numpy.sum', 'np.sum', (["fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']['gas']]"], {}), "(fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']\n ['gas']])\n", (9044, 9127), True, 'import numpy as np\n'), ((9163, 9257), 'numpy.sum', 'np.sum', (["fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes'][\n 'hydrogen']]"], {}), "(fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']\n ['hydrogen']])\n", (9169, 9257), True, 'import numpy as np\n'), ((9288, 9377), 'numpy.sum', 'np.sum', (["fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']['oil']]"], {}), "(fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']\n ['oil']])\n", (9294, 9377), True, 'import numpy as np\n'), ((9415, 9511), 'numpy.sum', 'np.sum', (["fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes'][\n 'solid_fuel']]"], {}), "(fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']\n ['solid_fuel']])\n", (9421, 9511), True, 'import numpy as np\n'), ((9546, 9639), 'numpy.sum', 'np.sum', (["fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']['biomass']\n ]"], {}), "(fuel_disagg['industry'][region][enduse][sector][lookups['fueltypes']\n ['biomass']])\n", (9552, 9639), True, 'import numpy as np\n'), ((941, 966), 'numpy.sum', 'np.sum', (['only_neg_elements'], {}), '(only_neg_elements)\n', (947, 966), True, 'import numpy as np\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import MkldnnAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig, OpConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
class TestMkldnnMatmulv2Op(MkldnnAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
if len(program_config.inputs["input_data2"].shape) == 4:
if program_config.inputs["input_data1"].shape[
-4] != 1 and program_config.inputs["input_data2"].shape[
-4] != 1:
if program_config.inputs["input_data1"].shape[
-4] != program_config.inputs["input_data2"].shape[-4]:
return False
if program_config.inputs["input_data1"].shape[
-3] != 1 and program_config.inputs["input_data2"].shape[
-3] != 1:
if program_config.inputs["input_data1"].shape[
-3] != program_config.inputs["input_data2"].shape[-3]:
return False
return True
def sample_program_configs(self, *args, **kwargs):
def generate_input(type, *args, **kwargs):
transpose_X = kwargs["transpose_X"]
transpose_Y = kwargs["transpose_Y"]
batch_size1 = kwargs["batch_size1"]
batch_size2 = kwargs["batch_size2"]
channel1 = kwargs["channel1"]
channel2 = kwargs["channel2"]
input_dim = kwargs["input_dim"]
y_dim_len = kwargs["y_dim_len"]
if transpose_X and transpose_Y:
shape_x = [batch_size1, channel1, input_dim, 32]
if y_dim_len == 4:
shape_y = [batch_size2, channel2, 64, input_dim]
elif y_dim_len == 3:
shape_y = [channel2, 64, input_dim]
elif transpose_X:
shape_x = [batch_size1, channel1, input_dim, 32]
if y_dim_len == 4:
shape_y = [batch_size2, channel2, input_dim, 64]
elif y_dim_len == 3:
shape_y = [channel2, input_dim, 64]
elif transpose_Y:
shape_x = [batch_size1, channel1, 32, input_dim]
if y_dim_len == 4:
shape_y = [batch_size2, channel2, 8, input_dim]
elif y_dim_len == 3:
shape_y = [channel2, 8, input_dim]
else:
shape_x = [batch_size1, channel1, 32, input_dim]
if y_dim_len == 4:
shape_y = [batch_size2, channel2, input_dim, 16]
elif y_dim_len == 3:
shape_y = [channel2, input_dim, 16]
if type == "x":
return np.random.random(shape_x).astype(np.float32)
else:
return np.random.random(shape_y).astype(np.float32)
matmul_op = OpConfig(
type="matmul_v2",
inputs={"X": ["input_data1"],
"Y": ["input_data2"]},
outputs={"Out": ["matmul_output"]},
attrs={
"trans_x": kwargs["transpose_X"],
"trans_y": kwargs["transpose_Y"],
"fused_reshape_X": [],
"fused_reshape_Y": [],
"fused_transpose_X": [],
"fused_transpose_Y": [],
"fused_reshape_Out": [],
"fused_transpose_Out": []
})
program_config = ProgramConfig(
ops=[matmul_op],
weights={},
inputs={
"input_data1": TensorConfig(data_gen=partial(
generate_input, "x", *args, **kwargs)),
"input_data2": TensorConfig(data_gen=partial(
generate_input, "y", *args, **kwargs))
},
outputs=["matmul_output"])
yield program_config
def sample_predictor_configs(self, program_config):
config = self.create_inference_config(use_mkldnn=True)
yield config, (1e-5, 1e-5)
@given(
transpose_X=st.booleans(),
transpose_Y=st.booleans(),
y_dim_len=st.sampled_from([3, 4]),
batch_size1=st.integers(
min_value=1, max_value=4),
batch_size2=st.integers(
min_value=1, max_value=4),
channel1=st.sampled_from([1, 16, 32, 64]),
channel2=st.sampled_from([1, 16, 32, 64]),
input_dim=st.sampled_from([16, 32, 64]))
def test(self, *args, **kwargs):
self.run_test(*args, **kwargs)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"functools.partial",
"program_config.OpConfig",
"hypothesis.strategies.sampled_from",
"hypothesis.strategies.booleans",
"numpy.random.random",
"hypothesis.strategies.integers"
] | [((5410, 5425), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5423, 5425), False, 'import unittest\n'), ((3732, 4092), 'program_config.OpConfig', 'OpConfig', ([], {'type': '"""matmul_v2"""', 'inputs': "{'X': ['input_data1'], 'Y': ['input_data2']}", 'outputs': "{'Out': ['matmul_output']}", 'attrs': "{'trans_x': kwargs['transpose_X'], 'trans_y': kwargs['transpose_Y'],\n 'fused_reshape_X': [], 'fused_reshape_Y': [], 'fused_transpose_X': [],\n 'fused_transpose_Y': [], 'fused_reshape_Out': [], 'fused_transpose_Out': []\n }"}), "(type='matmul_v2', inputs={'X': ['input_data1'], 'Y': [\n 'input_data2']}, outputs={'Out': ['matmul_output']}, attrs={'trans_x':\n kwargs['transpose_X'], 'trans_y': kwargs['transpose_Y'],\n 'fused_reshape_X': [], 'fused_reshape_Y': [], 'fused_transpose_X': [],\n 'fused_transpose_Y': [], 'fused_reshape_Out': [], 'fused_transpose_Out':\n []})\n", (3740, 4092), False, 'from program_config import TensorConfig, ProgramConfig, OpConfig\n'), ((4913, 4926), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (4924, 4926), True, 'import hypothesis.strategies as st\n'), ((4948, 4961), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (4959, 4961), True, 'import hypothesis.strategies as st\n'), ((4981, 5004), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[3, 4]'], {}), '([3, 4])\n', (4996, 5004), True, 'import hypothesis.strategies as st\n'), ((5026, 5063), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(4)'}), '(min_value=1, max_value=4)\n', (5037, 5063), True, 'import hypothesis.strategies as st\n'), ((5098, 5135), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(4)'}), '(min_value=1, max_value=4)\n', (5109, 5135), True, 'import hypothesis.strategies as st\n'), ((5167, 5199), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[1, 16, 32, 64]'], {}), '([1, 16, 32, 64])\n', (5182, 5199), True, 'import hypothesis.strategies as st\n'), ((5218, 5250), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[1, 16, 32, 64]'], {}), '([1, 16, 32, 64])\n', (5233, 5250), True, 'import hypothesis.strategies as st\n'), ((5270, 5299), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[16, 32, 64]'], {}), '([16, 32, 64])\n', (5285, 5299), True, 'import hypothesis.strategies as st\n'), ((3580, 3605), 'numpy.random.random', 'np.random.random', (['shape_x'], {}), '(shape_x)\n', (3596, 3605), True, 'import numpy as np\n'), ((3666, 3691), 'numpy.random.random', 'np.random.random', (['shape_y'], {}), '(shape_y)\n', (3682, 3691), True, 'import numpy as np\n'), ((4451, 4496), 'functools.partial', 'partial', (['generate_input', '"""x"""', '*args'], {}), "(generate_input, 'x', *args, **kwargs)\n", (4458, 4496), False, 'from functools import partial\n'), ((4573, 4618), 'functools.partial', 'partial', (['generate_input', '"""y"""', '*args'], {}), "(generate_input, 'y', *args, **kwargs)\n", (4580, 4618), False, 'from functools import partial\n')] |
#ZADANIE 1
#Napisz program realizujacy poszukiwanie miejsc zerowych
#powyzszych funkcji z punktu a) i b). Wykorzystaj metode graficzna,
#liniowej inkrementacji i bisekcji. Stworz odpowiednie funkcje
#implementujace wymienione metody poszukiwania miejsc zerowych.
#Dobierz odpowiednio obszary wyszukiwania. Wykonaj analize bledow.
#Opisz w sprawozdaniu wnioski.
import numpy as np
import matplotlib.pyplot as plt
def fxA(x):
return 7 * x**5 + 9 * x**2 - 5 * x
def fxB(x):
return (1 / ((x - 0.3)**2 + 0.01)) - (1 / ((x - 0.8)**2) + 0.04)
def linearIncremental(fx, xstart, xd, maxincr):
x = xstart
fstart = fx(x)
for i in range(maxincr):
x = xstart + i * xd
if fstart * fx(x) < 0:
break
if fstart * fx(x) > 0:
raise Exception("Nie znaleziono rozwiazania!")
else:
return x - (xd * fx(x)) / (fx(x)-fx(x - xd))
def bisection(fx, a, b, err):
while np.absolute(b - a) > err:
midPoint = (a + b) * 0.5
if fx(midPoint) * fx(a) < 0:
b = midPoint
midPoint = (a + b) * 0.5
if fx(midPoint) * fx(b) < 0:
a = midPoint
return b - (b - a) * fx(b) / (fx(b) - fx(a))
print("Funkcja A")
print("\nTest metody graficznej")
x = np.arange(-3, 3, 0.1)
plt.plot(x, fxA(x), 'r.')
plt.grid(True)
plt.show()
print("\nTest metody inkrementacji")
err = fxA(linearIncremental(fxA, -3, 0.01, 500))
print("x1 = ",linearIncremental(fxA, -3, 0.01, 500), "fx(x1) = ", err)
err = fxA(linearIncremental(fxA, 0, 0.01, 500))
print("x2 = ",linearIncremental(fxA, 0, 0.01, 500), "fx(x2) = ", err)
err = fxA(linearIncremental(fxA, 0.5, 0.01, 500))
print("x3 = ",linearIncremental(fxA, 0.5, 0.01, 500), "fx(x3) = ", err)
print("\nTest metody bisekcji")
print("x1 = ", bisection(fxA, -5, 1, 0.001))
print("x2 = ", bisection(fxA, -4, 1, 0.001))
print("x3 = ", bisection(fxA, -3, 1, 0.001))
print("\n\nFunkcja B")
print("\nTest metody graficznej")
x = np.arange(-3, 3, 0.1)
plt.plot(x, fxB(x), 'y.')
plt.grid(True)
plt.show()
print("\nTest metody inkrementacji")
err = fxB(linearIncremental(fxB, -3, 0.01, 500))
print("x1 = ",linearIncremental(fxB, -3, 0.01, 500), "fx(x1) = ", err)
err = fxB(linearIncremental(fxB, 0, 0.01, 500))
print("x2 = ",linearIncremental(fxB, 0, 0.01, 500), "fx(x2) = ", err)
err = fxB(linearIncremental(fxB, 0.5, 0.01, 500))
print("x3 = ",linearIncremental(fxB, 0.5, 0.01, 500), "fx(x3) = ", err)
print("\nTest metody bisekcji")
print("x1 = ", bisection(fxB, -5, 1, 0.001))
print("x2 = ", bisection(fxB, -4, 1, 0.001))
print("x3 = ", bisection(fxB, -3, 1, 0.001))
#Program umozliwia znalezienie miejsc zerowych funkcji na trzy sposoby:
#- metody graficznej,
#- metody liniowej inkrementacji,
#- metody bisekcji.
#Przedstawione w programie metody znalezienia miejsc zerowych nie sa idealne.
#Metoda liniowej inkrementacji zwraca wysoki blad.
#Metoda bisekcji nie zwraca dokladnych wartosci miejsc zerowych. | [
"numpy.absolute",
"matplotlib.pyplot.show",
"numpy.arange",
"matplotlib.pyplot.grid"
] | [((1245, 1266), 'numpy.arange', 'np.arange', (['(-3)', '(3)', '(0.1)'], {}), '(-3, 3, 0.1)\n', (1254, 1266), True, 'import numpy as np\n'), ((1293, 1307), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1301, 1307), True, 'import matplotlib.pyplot as plt\n'), ((1308, 1318), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1316, 1318), True, 'import matplotlib.pyplot as plt\n'), ((1948, 1969), 'numpy.arange', 'np.arange', (['(-3)', '(3)', '(0.1)'], {}), '(-3, 3, 0.1)\n', (1957, 1969), True, 'import numpy as np\n'), ((1996, 2010), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2004, 2010), True, 'import matplotlib.pyplot as plt\n'), ((2011, 2021), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2019, 2021), True, 'import matplotlib.pyplot as plt\n'), ((922, 940), 'numpy.absolute', 'np.absolute', (['(b - a)'], {}), '(b - a)\n', (933, 940), True, 'import numpy as np\n')] |
from config_generator import configurator
import numpy as np
from random import uniform, randint
algorithms = ['xstream']
TuningMode = True
if TuningMode is False:
names = []
for i in range(24):
name = input()
names.append(name)
for algo in algorithms:
if(algo == 'xstream'):
for i in range(24):
k = int(input())
chains = int(input())
configurator('xstream', [k, chains], str(i), names[i])
elif(algo == 'hst'):
for i in range(24):
trees = int(input())
configurator('hst', [trees], str(i), names[i])
elif(algo == 'hstf'):
for i in range(24):
trees = input()
forget = input()
configurator('hstf', [trees, forget], str(i), names[i])
elif(algo == 'rrcf'):
for i in range(24):
trees = int(input())
forget = input()
configurator('rrcf', [trees, forget], str(i), names[i])
elif(algo == 'mcod'):
for i in range(24):
dist = (input())
neighbor = (input())
configurator('mcod', [dist, neighbor], str(i), names[i])
elif(algo == 'loda'):
for i in range(24):
configurator('loda', [0, 0], '0', names[i])
quit()
for algo in algorithms:
if(algo == 'xstream'):
algorithm = 'xstream'
chains = [25,50,100]
k = [25,50,100]
xs, ys = np.meshgrid(chains, k)
tmp = (np.dstack([xs, ys]))
params = []
for i in range(1, len(tmp)):
if(params == []):
params = np.concatenate((tmp[i-1], tmp[i]))
else:
params = np.concatenate((params, tmp[i]))
i = 0
for param in params:
configurator(algorithm, param, str(i))
i += 1
elif(algo == 'hst'):
params = [[50]] #[[25],[50],[100]]
i = 0
for param in params:
configurator('hst', param, str(i))
i += 1
elif(algo == 'hstf'):
n_trees = [50] #[25,50,100]
forget = [64,128,256,512, 'max']
xs, ys = np.meshgrid(n_trees, forget)
tmp = (np.dstack([xs, ys]))
params = []
for i in range(1, len(tmp)):
if(params == []):
params = np.concatenate((tmp[i-1], tmp[i]))
else:
params = np.concatenate((params, tmp[i]))
i = 0
for param in params:
configurator('hstf', param, str(i))
i += 1
elif(algo == 'rrcf'):
n_trees = [50] #[25,50,100]
forget = ['max', 'max'] #[64,128,256,512, 'max']
xs, ys = np.meshgrid(n_trees, forget)
tmp = (np.dstack([xs, ys]))
params = []
for i in range(1, len(tmp)):
if(params == []):
params = np.concatenate((tmp[i-1], tmp[i]))
else:
params = np.concatenate((params, tmp[i]))
i = 0
for param in params:
configurator('rrcf', param, str(i))
i += 1
elif(algo == 'mcod'):
#for i in range(100):
# configurator('mcod', [uniform(0.1, 4), randint(1, 64)], str(i))
configurator('mcod', [0.741, 22], str(i))
elif(algo == 'loda'):
configurator('loda', [0, 0], '0') | [
"numpy.dstack",
"numpy.meshgrid",
"numpy.concatenate",
"config_generator.configurator"
] | [((1594, 1616), 'numpy.meshgrid', 'np.meshgrid', (['chains', 'k'], {}), '(chains, k)\n', (1605, 1616), True, 'import numpy as np\n'), ((1633, 1652), 'numpy.dstack', 'np.dstack', (['[xs, ys]'], {}), '([xs, ys])\n', (1642, 1652), True, 'import numpy as np\n'), ((1770, 1806), 'numpy.concatenate', 'np.concatenate', (['(tmp[i - 1], tmp[i])'], {}), '((tmp[i - 1], tmp[i]))\n', (1784, 1806), True, 'import numpy as np\n'), ((1850, 1882), 'numpy.concatenate', 'np.concatenate', (['(params, tmp[i])'], {}), '((params, tmp[i]))\n', (1864, 1882), True, 'import numpy as np\n'), ((2307, 2335), 'numpy.meshgrid', 'np.meshgrid', (['n_trees', 'forget'], {}), '(n_trees, forget)\n', (2318, 2335), True, 'import numpy as np\n'), ((2352, 2371), 'numpy.dstack', 'np.dstack', (['[xs, ys]'], {}), '([xs, ys])\n', (2361, 2371), True, 'import numpy as np\n'), ((2878, 2906), 'numpy.meshgrid', 'np.meshgrid', (['n_trees', 'forget'], {}), '(n_trees, forget)\n', (2889, 2906), True, 'import numpy as np\n'), ((2923, 2942), 'numpy.dstack', 'np.dstack', (['[xs, ys]'], {}), '([xs, ys])\n', (2932, 2942), True, 'import numpy as np\n'), ((2499, 2535), 'numpy.concatenate', 'np.concatenate', (['(tmp[i - 1], tmp[i])'], {}), '((tmp[i - 1], tmp[i]))\n', (2513, 2535), True, 'import numpy as np\n'), ((2579, 2611), 'numpy.concatenate', 'np.concatenate', (['(params, tmp[i])'], {}), '((params, tmp[i]))\n', (2593, 2611), True, 'import numpy as np\n'), ((3060, 3096), 'numpy.concatenate', 'np.concatenate', (['(tmp[i - 1], tmp[i])'], {}), '((tmp[i - 1], tmp[i]))\n', (3074, 3096), True, 'import numpy as np\n'), ((3140, 3172), 'numpy.concatenate', 'np.concatenate', (['(params, tmp[i])'], {}), '((params, tmp[i]))\n', (3154, 3172), True, 'import numpy as np\n'), ((3515, 3548), 'config_generator.configurator', 'configurator', (['"""loda"""', '[0, 0]', '"""0"""'], {}), "('loda', [0, 0], '0')\n", (3527, 3548), False, 'from config_generator import configurator\n'), ((1372, 1415), 'config_generator.configurator', 'configurator', (['"""loda"""', '[0, 0]', '"""0"""', 'names[i]'], {}), "('loda', [0, 0], '0', names[i])\n", (1384, 1415), False, 'from config_generator import configurator\n')] |
import numpy as np
def swap(arr, i, j):
"""
Swap two elements in an array
Parameters
----------
arr: list
The array
i: int
Index of first element
j: int
Index of second element
"""
temp = arr[i]
arr[i] = arr[j]
arr[j] = temp
def merge(x, y, i1, mid, i2):
"""
Perform a merge of two contiguous sorted sub-chunks of
the array x, using y as a staging area
Parameters
----------
x: list
The main array
y: list
The array to copy into as the two chunks are being merged
i1: int
Left of first chunk
mid: int
Right of first chunk
i2: int
End of second chunk
"""
## TODO: Fill this in
pass
def mergesort_rec(x, y, i1, i2):
"""
A recursive call to sort a subset of the array
Parameters
----------
x: list
Array to sort
y: list
A temporary array / staging area to store intermediate results
i1: int
First index of chunk to sort, inclusive
i2: int
Second index of chunk to sort, inclusive (i2 >= i1)
"""
if (i1 == i2):
# Base case: A single number
return
elif (i2 - i1 == 1):
# Base case: A pair of numbers right next to each other
if (x[i2] < x[i1]):
swap(x, i1, i2)
else:
# More than two; need to "divide and conquer"
mid = (i1 + i2)//2
mergesort_rec(x, y, i1, mid)
mergesort_rec(x, y, mid+1, i2)
merge(x, y, i1, mid, i2)
def mergesort(x):
"""
An entry point for merge sort on the entire array
Parameters
----------
x: list
Array to sort
"""
y = [0]*len(x)
mergesort_rec(x, y, 0, len(x)-1)
np.random.seed(0)
x = np.random.randint(0, 100, 20).tolist()
mergesort(x)
print(x) | [
"numpy.random.randint",
"numpy.random.seed"
] | [((1759, 1776), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1773, 1776), True, 'import numpy as np\n'), ((1781, 1810), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', '(20)'], {}), '(0, 100, 20)\n', (1798, 1810), True, 'import numpy as np\n')] |
import binascii
import gzip
import json
import logging
import os
import glob
import fnmatch
import shutil
import time
import zlib
import io
import pickle
import tempfile
import re
from abc import ABC, abstractmethod
from collections import OrderedDict
from functools import total_ordering
from typing import Any, List, Optional, Iterable, Callable
import numpy
from azure.core import MatchConditions
from azure.core.exceptions import HttpResponseError
from azure.identity import DefaultAzureCredential
from azure.storage.blob import ContainerClient
from dpu_utils.utils.dataloading import save_json_gz, save_jsonl_gz
try:
import msgpack
except ImportError:
pass # Continue without msgpack support
AZURE_PATH_PREFIX = "azure://"
__all__ = ['RichPath', 'LocalPath', 'AzurePath']
logging.getLogger('azure.storage.blob').setLevel(logging.ERROR)
logging.getLogger('azure.core').setLevel(logging.ERROR)
@total_ordering
class RichPath(ABC):
"""
RichPath is an abstraction layer of local and remote paths allowing unified access
of both local and remote files. Currently, only local and Azure blob paths are supported.
To use Azure paths, if the current environment is set up you need no further action.
See https://docs.microsoft.com/en-us/python/api/azure-identity/azure.identity.defaultazurecredential for
potential default configuration (az login, Service Principals, ManagedIdentity, etc.
Alternatively a .json configuration file needs to be passed in the
`RichPath.create()` function. The file has the format:
```
{
"storage_account_name": {
"sas_token": "<PASSWORD>",
"endpoint": "url if this is not a *.blob.core.windows.net endpoint"
"cache_location": "optional location to cache blobs locally"
}
...
}
```
or
```
{
"storage_account_name": {
"connection_string" : "the_string",
"endpoint": "url if this is not a *.blob.core.windows.net endpoint"
"cache_location": "optional location to cache blobs locally"
}
...
}
```
or
```
{
"storage_account_name": {
"account_key": "they key"
"endpoint": "url if this is not a *.blob.core.windows.net endpoint"
"cache_location": "optional location to cache blobs locally"
}
...
}
```
Where `storage_account_name` is the name of the storage account in the Azure portal.
Multiple storage accounts can be placed in a single file. This allows to address blobs
and "directories" as `azure://storage_account_name/container_name/path/to/blob`. The
Azure SAS token can be retrieved from the Azure portal or from the Azure Storage Explorer.
The `cache_location` may contain environment variables, e.g. `/path/to/${SOME_VAR}/dir`
that will be replaced appropriately.
If an external library requires a local path, you can ensure that a `RichPath`
object represents a local (possibly cached) object by returning
```
original_object.to_local_path().path
```
which will download the remote object(s), if needed, and provide a local path.
"""
def __init__(self, path: str):
self.__path = path
@property
def path(self) -> str:
return self.__path
@staticmethod
def create(path: str, azure_info_path: Optional[str]=None) -> 'RichPath':
"""This creates a RichPath object based on the input path.
To create a remote path, just prefix it appropriately and pass
in the path to the .json configuration.
"""
if path.startswith(AZURE_PATH_PREFIX):
# Strip off the AZURE_PATH_PREFIX:
path = path[len(AZURE_PATH_PREFIX):]
account_name, container_name, path = path.split('/', 2)
if azure_info_path is not None:
with open(azure_info_path, 'r') as azure_info_file:
azure_info = json.load(azure_info_file)
azure_info = azure_info.get(account_name)
if azure_info is None:
raise Exception("Could not find access information for account '%s'!" % (account_name,))
account_endpoint = azure_info.get('endpoint', "https://%s.blob.core.windows.net/" % account_name)
cache_location = azure_info.get('cache_location')
connection_string = azure_info.get('connection_string')
sas_token = azure_info.get('sas_token')
account_key = azure_info.get('account_key')
if connection_string is not None:
container_client = ContainerClient.from_connection_string(connection_string, container_name)
elif sas_token is not None:
query_string: str = sas_token
if not query_string.startswith('?'):
query_string = '?' + query_string
container_client = ContainerClient.from_container_url(f"{account_endpoint}/{container_name}{query_string}")
elif account_key is not None:
connection_string = f"AccountName={account_name};AccountKey={account_key};BlobEndpoint={account_endpoint};"
container_client = ContainerClient.from_connection_string(connection_string, container_name)
else:
raise Exception("Access to Azure storage account '%s' with azure_info_path requires either account_key or sas_token!" % (
account_name,
))
else:
token_credential = DefaultAzureCredential()
# This is the correct URI for all non-sovereign clouds or emulators.
account_endpoint = "https://%s.blob.core.windows.net/" % account_name
cache_location = None
container_client = ContainerClient(
account_url=account_endpoint,
container_name=container_name,
credential=token_credential)
# Replace environment variables in the cache location
if cache_location is not None:
def replace_by_env_var(m) -> str:
env_var_name = m.group(1)
env_var_value = os.environ.get(env_var_name)
if env_var_value is not None:
return env_var_value
else:
return env_var_name
cache_location = re.sub('\\${([^}]+)}', replace_by_env_var, cache_location)
return AzurePath(path,
azure_container_client=container_client,
cache_location=cache_location)
else:
return LocalPath(path)
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return self.path < other.path
@abstractmethod
def is_dir(self) -> bool:
pass
@abstractmethod
def is_file(self) -> bool:
pass
@abstractmethod
def make_as_dir(self) -> None:
pass
@abstractmethod
def read_as_binary(self) -> bytes:
"""Read possibly compressed binary file."""
pass
@abstractmethod
def save_as_compressed_file(self, data: Any) -> None:
pass
def read_as_text(self) -> str:
return self.read_as_binary().decode('utf-8')
def read_as_json(self) -> Any:
return json.loads(self.read_as_text(), object_pairs_hook=OrderedDict)
def read_as_jsonl(self, error_handling: Optional[Callable[[str, Exception], None]]=None) -> Iterable[Any]:
"""
Parse JSONL files. See http://jsonlines.org/ for more.
:param error_handling: a callable that receives the original line and the exception object and takes
over how parse error handling should happen.
:return: a iterator of the parsed objects of each line.
"""
for line in self.read_as_text().splitlines():
try:
yield json.loads(line, object_pairs_hook=OrderedDict)
except Exception as e:
if error_handling is None:
raise
else:
error_handling(line, e)
@abstractmethod
def read_as_pickle(self) -> Any:
pass
@abstractmethod
def read_as_msgpack_l(self) -> Iterable[Any]:
pass
@abstractmethod
def read_as_msgpack(self) -> Any:
pass
@abstractmethod
def read_as_numpy(self) -> Any:
pass
def read_by_file_suffix(self) -> Any:
if self.path.endswith('.json.gz') or self.path.endswith('.json'):
return self.read_as_json()
if self.path.endswith('.jsonl.gz') or self.path.endswith('.jsonl'):
return self.read_as_jsonl()
if self.path.endswith('.pkl.gz') or self.path.endswith('.pkl'):
return self.read_as_pickle()
if self.path.endswith('.msgpack.gz') or self.path.endswith('.msgpack'):
return self.read_as_msgpack()
if self.path.endswith('.msgpack.l.gz') or self.path.endswith('.msgpack.l'):
return self.read_as_msgpack_l()
if self.path.endswith('.npy') or self.path.endswith('.npz'):
return self.read_as_numpy()
raise ValueError('File suffix must be .json, .json.gz, .pkl, .pkl.gz, .npy or .npz: %s' % self.path)
def get_filtered_files_in_dir(self, file_pattern: str) -> List['RichPath']:
return list(self.iterate_filtered_files_in_dir(file_pattern))
@abstractmethod
def iterate_filtered_files_in_dir(self, file_pattern: str) -> Iterable['RichPath']:
pass
@abstractmethod
def join(self, filename: str) -> 'RichPath':
pass
@abstractmethod
def basename(self) -> str:
pass
@abstractmethod
def get_size(self) -> int:
pass
@abstractmethod
def exists(self) -> bool:
pass
@abstractmethod
def to_local_path(self) -> 'LocalPath':
pass
@abstractmethod
def relpath(self, base: 'RichPath') -> str:
pass
@abstractmethod
def delete(self, missing_ok: bool=True) -> None:
pass
def copy_from(self, source_path: 'RichPath', overwrite_ok: bool=True) -> None:
if source_path.is_dir():
assert self.is_dir() or not self.exists(), 'Source path is a directory, but the target is a file.'
for file in source_path.iterate_filtered_files_in_dir('*'):
target_file_path = self.join(file.relpath(source_path))
target_file_path.copy_from(file, overwrite_ok=overwrite_ok)
else:
if not overwrite_ok and self.exists():
raise Exception('Overwriting file when copying.')
self._copy_from_file(source_path)
def _copy_from_file(self, from_file: 'RichPath') -> None:
"""Default implementation for copying a file into another. This converts the from_file to a local path
and copies from there."""
assert from_file.exists()
self._copy_from_local_file(from_file.to_local_path())
@abstractmethod
def _copy_from_local_file(self, local_file: 'LocalPath') -> None:
pass
class LocalPath(RichPath):
def __init__(self, path: str):
super().__init__(path)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.path == other.path
def __hash__(self):
return hash(self.path)
def __repr__(self):
return self.path
def is_dir(self) -> bool:
return os.path.isdir(self.path)
def is_file(self) -> bool:
return os.path.isfile(self.path)
def make_as_dir(self):
os.makedirs(self.path, exist_ok=True)
def relpath(self, base: 'LocalPath') -> str:
assert isinstance(base, LocalPath), 'base must also be a LocalPath'
return os.path.relpath(self.path, base.path)
def read_as_binary(self) -> bytes:
if self.__is_gzipped(self.path):
with gzip.open(self.path) as f:
return f.read()
else:
with open(self.path, 'rb') as f:
return f.read()
def read_as_jsonl(self, error_handling: Optional[Callable[[str, Exception], None]]=None) -> Iterable[Any]:
"""
Iterate through JSONL files. See http://jsonlines.org/ for more.
:param error_handling: a callable that receives the original line and the exception object and takes
over how parse error handling should happen.
:return: a iterator of the parsed objects of each line.
"""
if self.__is_gzipped(self.path):
fh = gzip.open(self.path, mode='rt', encoding='utf-8')
else:
fh = open(self.path, 'rt', encoding='utf-8')
try:
for line in fh:
try:
yield json.loads(line, object_pairs_hook=OrderedDict)
except Exception as e:
if error_handling is None:
raise
else:
error_handling(line, e)
finally:
fh.close()
def read_as_pickle(self) -> Any:
if self.__is_gzipped(self.path):
with gzip.open(self.path) as f:
return pickle.load(f)
else:
with open(self.path, 'rb') as f:
return pickle.load(f)
def read_as_msgpack_l(self) -> Iterable[Any]:
if self.__is_gzipped(self.path):
with gzip.open(self.path) as f:
unpacker = msgpack.Unpacker(f, raw=False, object_pairs_hook=OrderedDict)
yield from unpacker
else:
with open(self.path, 'rb') as f:
unpacker = msgpack.Unpacker(f, raw=False, object_pairs_hook=OrderedDict)
yield from unpacker
def read_as_msgpack(self) -> Any:
if self.__is_gzipped(self.path):
with gzip.open(self.path) as f:
return msgpack.load(f)
else:
with open(self.path, 'rb') as f:
return msgpack.load(f)
def read_as_numpy(self) -> Any:
with open(self.path, 'rb') as f:
return numpy.load(f)
@staticmethod
def __is_gzipped(filename: str) -> bool:
with open(filename, 'rb') as f:
return binascii.hexlify(f.read(2)) == b'1f8b'
def save_as_compressed_file(self, data: Any) -> None:
if self.path.endswith('.json.gz'):
save_json_gz(data, self.path)
elif self.path.endswith('.jsonl.gz'):
save_jsonl_gz(data, self.path)
elif self.path.endswith('.pkl.gz'):
with gzip.GzipFile(self.path, 'wb') as outfile:
pickle.dump(data, outfile)
elif self.path.endswith('.msgpack.gz'):
with gzip.GzipFile(self.path, 'wb') as outfile:
msgpack.dump(data, outfile)
elif self.path.endswith('.msgpack.l.gz'):
with gzip.GzipFile(self.path, "wb") as out_file:
packer = msgpack.Packer(use_bin_type=True)
for element in data:
out_file.write(packer.pack(element))
else:
raise ValueError('File suffix must be `.json.gz`, `.pkl.gz`, `.msgpack.gz`, or `.msgpack.l.gz`. It was `%s`' % self.path)
def iterate_filtered_files_in_dir(self, file_pattern: str) -> Iterable['LocalPath']:
yield from (LocalPath(path)
for path in glob.iglob(os.path.join(self.path, file_pattern), recursive=True))
def join(self, filename: str) -> 'LocalPath':
return LocalPath(os.path.join(self.path, filename))
def basename(self) -> str:
return os.path.basename(self.path)
def get_size(self) -> int:
return os.stat(self.path).st_size
def exists(self) -> bool:
return os.path.exists(self.path)
def delete(self, missing_ok: bool=True) -> None:
try:
os.unlink(self.path)
except FileNotFoundError:
if not missing_ok:
raise
def to_local_path(self) -> 'LocalPath':
return self
def _copy_from_local_file(self, local_file: 'LocalPath') -> None:
os.makedirs(os.path.dirname(self.path), exist_ok=True)
shutil.copy2(src=local_file.path, dst=self.path)
class AzurePath(RichPath):
def __init__(self, path: str, azure_container_client: ContainerClient,
cache_location: Optional[str]):
super().__init__(path)
self.__container_client = azure_container_client
self.__blob_client = self.__container_client.get_blob_client(self.path)
self.__cache_location = cache_location
def __eq__(self, other):
return (self.__class__ == other.__class__
and self.path == other.path
and self.__blob_client == other.__blob_client)
def __hash__(self):
return hash(self.path)
def __repr__(self):
return "%s%s/%s/%s" % (AZURE_PATH_PREFIX, self.__container_client.account_name, self.__container_client.container_name, self.path)
def is_dir(self) -> bool:
blob_list = self.__container_client.list_blobs(self.path)
for blob in blob_list:
if blob.name == self.path:
# Listing this, yields the path itself, thus it's a file.
return False
return True
else:
return False # This path does not exist, return False by convention, similar to os.path.isdir()
def is_file(self) -> bool:
return not self.is_dir() and self.exists()
def relpath(self, base: 'AzurePath') -> str:
assert isinstance(base, AzurePath)
return os.path.relpath(self.path, base.path)
def make_as_dir(self) -> None:
# Note: Directories don't really exist in blob storage.
# Instead filenames may contain / -- thus, we have nothing to do here
pass
def read_as_binary(self) -> bytes:
if self.__cache_location is None:
return self.__read_as_binary()
cached_file_path = self.__cache_file_locally()
return cached_file_path.read_as_binary()
@property
def __cached_file_path(self) -> str:
return os.path.join(self.__cache_location, self.__container_client.container_name, self.path)
def __cache_file_locally(self, num_retries: int=1) -> LocalPath:
cached_file_path = self.__cached_file_path
cached_file_path_etag = cached_file_path+'.etag' # Create an .etag file containing the object etag
old_etag = None
if os.path.exists(cached_file_path_etag):
with open(cached_file_path_etag) as f:
old_etag = f.read()
try:
os.makedirs(os.path.dirname(cached_file_path), exist_ok=True)
# The next invocation to the blob service may fail and delete the current file. Store it elsewhere
new_filepath = cached_file_path+'.new'
if old_etag is not None:
downloader = self.__blob_client.download_blob(etag=old_etag, match_condition=MatchConditions.IfModified)
else:
downloader = self.__blob_client.download_blob()
with open(new_filepath, 'wb') as f:
downloader.readinto(f)
os.rename(new_filepath, cached_file_path)
with open(cached_file_path_etag, 'w') as f:
f.write(downloader.properties['etag'])
except HttpResponseError as responseError:
if responseError.status_code != 304: # HTTP 304: Not Modified
raise
except Exception as e:
if os.path.exists(cached_file_path):
os.remove(cached_file_path) # On failure, remove the cached file, if it exits.
os.remove(cached_file_path_etag)
if num_retries == 0:
raise
else:
self.__cache_file_locally(num_retries-1)
return LocalPath(cached_file_path)
def __read_as_binary(self) -> bytes:
with io.BytesIO() as stream:
self.__blob_client.download_blob().readinto(stream)
stream.seek(0)
if binascii.hexlify(stream.read(2)) != b'1f8b':
stream.seek(0)
return stream.read()
stream.seek(0)
decompressor = zlib.decompressobj(32 + zlib.MAX_WBITS)
decompressed_data = decompressor.decompress(stream.read())
return decompressed_data
def read_as_pickle(self) -> Any:
if self.__cache_location is None:
return pickle.loads(self.read_as_binary())
# This makes sure that we do not use a memory stream to store the temporary data:
cached_file_path = self.__cache_file_locally()
# We sometimes have a corrupted cache (if the process was killed while writing)
try:
data = cached_file_path.read_as_pickle()
except EOFError:
print("I: File '%s' corrupted in cache. Deleting and trying once more." % (self,))
os.unlink(cached_file_path.path)
cached_file_path = self.__cache_file_locally()
data = cached_file_path.read_as_pickle()
return data
def read_as_msgpack_l(self) -> Iterable[Any]:
if self.__cache_location is None:
unpacker = msgpack.Unpacker(self.__read_as_binary(), raw=False, object_pairs_hook=OrderedDict)
yield from unpacker
cached_file_path = self.__cache_file_locally()
return cached_file_path.read_as_msgpack_l()
def read_as_msgpack(self) -> Any:
if self.__cache_location is None:
return msgpack.load(self.__read_as_binary())
cached_file_path = self.__cache_file_locally()
return cached_file_path.read_as_msgpack()
def read_as_numpy(self) -> Any:
if self.__cache_location is None:
return numpy.load(self.read_as_binary())
# This makes sure that we do not use a memory stream to store the temporary data:
cached_file_path = self.__cache_file_locally()
# We sometimes have a corrupted cache (if the process was killed while writing)
try:
data = cached_file_path.read_as_numpy()
except EOFError:
print("I: File '%s' corrupted in cache. Deleting and trying once more." % (self,))
os.unlink(cached_file_path.path)
cached_file_path = self.__cache_file_locally()
data = cached_file_path.read_as_numpy()
return data
def read_by_file_suffix(self) -> Any:
# If we aren't caching, use the default implementation that redirects to specialised methods:
if self.__cache_location is None:
return super().read_by_file_suffix()
# This makes sure that we do not use a memory stream to store the temporary data:
cached_file_path = self.__cache_file_locally()
# We sometimes have a corrupted cache (if the process was killed while writing)
try:
return cached_file_path.read_by_file_suffix()
except EOFError:
print("I: File '%s' corrupted in cache. Deleting and trying once more." % (self,))
os.unlink(cached_file_path.path)
cached_file_path = self.__cache_file_locally()
return cached_file_path.read_by_file_suffix()
def save_as_compressed_file(self, data: Any):
# TODO: Python does not have a built-in "compress stream" functionality in its standard lib
# Thus, we just write out to a file and upload, but of course, this should be better...
if self.path.endswith('.json.gz'):
f = tempfile.NamedTemporaryFile(suffix='.json.gz', delete=False)
elif self.path.endswith('.jsonl.gz'):
f = tempfile.NamedTemporaryFile(suffix='.jsonl.gz', delete=False)
elif self.path.endswith('.pkl.gz'):
f = tempfile.NamedTemporaryFile(suffix='.pkl.gz', delete=False)
elif self.path.endswith('.msgpack.gz'):
f = tempfile.NamedTemporaryFile(suffix='.msgpack.gz', delete=False)
elif self.path.endswith('.msgpack.l.gz'):
f = tempfile.NamedTemporaryFile(suffix='.msgpack.l.gz', delete=False)
else:
raise ValueError('File suffix must be .json.gz, .jsonl.gz, .msgpack.gz, .msgpack.l.gz, or .pkl.gz: %s' % self.path)
try:
local_temp_file = LocalPath(f.name)
f.close()
local_temp_file.save_as_compressed_file(data)
with open(local_temp_file.path, 'rb') as local_fp:
self.__blob_client.upload_blob(local_fp, overwrite=True)
finally:
os.unlink(local_temp_file.path)
def iterate_filtered_files_in_dir(self, file_pattern: str) -> Iterable['AzurePath']:
full_pattern = os.path.join(self.path, file_pattern)
seen_at_least_one_in_dir = False
for blob in self.__container_client.list_blobs(name_starts_with=self.path):
seen_at_least_one_in_dir = True
if fnmatch.fnmatch(blob.name, full_pattern):
yield AzurePath(blob.name,
azure_container_client=self.__container_client,
cache_location=self.__cache_location)
if not seen_at_least_one_in_dir:
logging.warning("AzurePath is iterating over non-existent directory %s" % self)
def join(self, filename: str) -> 'AzurePath':
return AzurePath(os.path.join(self.path.rstrip('/'), filename),
azure_container_client=self.__container_client,
cache_location=self.__cache_location)
def basename(self) -> str:
return os.path.basename(self.path)
def get_size(self) -> int:
return self.__blob_client.get_blob_properties().size
def exists(self) -> bool:
if not self.is_dir():
return self.__blob_client.exists()
else:
return True
def delete(self, missing_ok: bool=True) -> None:
if self.is_file():
self.__blob_client.delete_blob()
os.unlink(self.__cached_file_path)
elif not missing_ok:
raise FileNotFoundError(self.path)
def to_local_path(self) -> LocalPath:
"""Cache all files locally and return their local path."""
assert self.__cache_location is not None, 'Cannot convert AzurePath to LocalPath when no cache location exists.'
if self.is_dir():
for file in self.iterate_filtered_files_in_dir('*'):
file.to_local_path()
return LocalPath(self.__cached_file_path)
else:
return self.__cache_file_locally()
def _copy_from_file(self, from_file: RichPath) -> None:
if not isinstance(from_file, AzurePath):
# Default to copying the file locally first.
super()._copy_from_file(from_file)
return
if not from_file.exists():
raise FileNotFoundError(f"File {from_file} not found")
self.__blob_client.start_copy_from_url(from_file.__blob_client.url)
while self.__blob_client.get_blob_properties().copy.status == 'pending':
time.sleep(.1)
if self.__blob_client.get_blob_properties().copy.status != 'success':
copy = self.__blob_client.get_blob_properties().copy
raise Exception('Failed to copy between Azure blobs: %s %s' % (copy.status, copy.status_description))
def _copy_from_local_file(self, local_file: LocalPath) -> None:
with open(local_file.path, 'rb') as f:
self.__container_client.get_blob_client(self.path).upload_blob(f, overwrite=True)
| [
"dpu_utils.utils.dataloading.save_json_gz",
"numpy.load",
"os.remove",
"pickle.dump",
"os.unlink",
"dpu_utils.utils.dataloading.save_jsonl_gz",
"os.path.isfile",
"pickle.load",
"azure.storage.blob.ContainerClient.from_container_url",
"azure.identity.DefaultAzureCredential",
"azure.storage.blob.C... | [((833, 872), 'logging.getLogger', 'logging.getLogger', (['"""azure.storage.blob"""'], {}), "('azure.storage.blob')\n", (850, 872), False, 'import logging\n'), ((898, 929), 'logging.getLogger', 'logging.getLogger', (['"""azure.core"""'], {}), "('azure.core')\n", (915, 929), False, 'import logging\n'), ((12008, 12032), 'os.path.isdir', 'os.path.isdir', (['self.path'], {}), '(self.path)\n', (12021, 12032), False, 'import os\n'), ((12083, 12108), 'os.path.isfile', 'os.path.isfile', (['self.path'], {}), '(self.path)\n', (12097, 12108), False, 'import os\n'), ((12148, 12185), 'os.makedirs', 'os.makedirs', (['self.path'], {'exist_ok': '(True)'}), '(self.path, exist_ok=True)\n', (12159, 12185), False, 'import os\n'), ((12331, 12368), 'os.path.relpath', 'os.path.relpath', (['self.path', 'base.path'], {}), '(self.path, base.path)\n', (12346, 12368), False, 'import os\n'), ((16266, 16293), 'os.path.basename', 'os.path.basename', (['self.path'], {}), '(self.path)\n', (16282, 16293), False, 'import os\n'), ((16420, 16445), 'os.path.exists', 'os.path.exists', (['self.path'], {}), '(self.path)\n', (16434, 16445), False, 'import os\n'), ((16854, 16902), 'shutil.copy2', 'shutil.copy2', ([], {'src': 'local_file.path', 'dst': 'self.path'}), '(src=local_file.path, dst=self.path)\n', (16866, 16902), False, 'import shutil\n'), ((18325, 18362), 'os.path.relpath', 'os.path.relpath', (['self.path', 'base.path'], {}), '(self.path, base.path)\n', (18340, 18362), False, 'import os\n'), ((18871, 18961), 'os.path.join', 'os.path.join', (['self.__cache_location', 'self.__container_client.container_name', 'self.path'], {}), '(self.__cache_location, self.__container_client.container_name,\n self.path)\n', (18883, 18961), False, 'import os\n'), ((19228, 19265), 'os.path.exists', 'os.path.exists', (['cached_file_path_etag'], {}), '(cached_file_path_etag)\n', (19242, 19265), False, 'import os\n'), ((25609, 25646), 'os.path.join', 'os.path.join', (['self.path', 'file_pattern'], {}), '(self.path, file_pattern)\n', (25621, 25646), False, 'import os\n'), ((26522, 26549), 'os.path.basename', 'os.path.basename', (['self.path'], {}), '(self.path)\n', (26538, 26549), False, 'import os\n'), ((13138, 13187), 'gzip.open', 'gzip.open', (['self.path'], {'mode': '"""rt"""', 'encoding': '"""utf-8"""'}), "(self.path, mode='rt', encoding='utf-8')\n", (13147, 13187), False, 'import gzip\n'), ((14729, 14742), 'numpy.load', 'numpy.load', (['f'], {}), '(f)\n', (14739, 14742), False, 'import numpy\n'), ((15028, 15057), 'dpu_utils.utils.dataloading.save_json_gz', 'save_json_gz', (['data', 'self.path'], {}), '(data, self.path)\n', (15040, 15057), False, 'from dpu_utils.utils.dataloading import save_json_gz, save_jsonl_gz\n'), ((16181, 16214), 'os.path.join', 'os.path.join', (['self.path', 'filename'], {}), '(self.path, filename)\n', (16193, 16214), False, 'import os\n'), ((16344, 16362), 'os.stat', 'os.stat', (['self.path'], {}), '(self.path)\n', (16351, 16362), False, 'import os\n'), ((16529, 16549), 'os.unlink', 'os.unlink', (['self.path'], {}), '(self.path)\n', (16538, 16549), False, 'import os\n'), ((16802, 16828), 'os.path.dirname', 'os.path.dirname', (['self.path'], {}), '(self.path)\n', (16817, 16828), False, 'import os\n'), ((19961, 20002), 'os.rename', 'os.rename', (['new_filepath', 'cached_file_path'], {}), '(new_filepath, cached_file_path)\n', (19970, 20002), False, 'import os\n'), ((20733, 20745), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (20743, 20745), False, 'import io\n'), ((21037, 21076), 'zlib.decompressobj', 'zlib.decompressobj', (['(32 + zlib.MAX_WBITS)'], {}), '(32 + zlib.MAX_WBITS)\n', (21055, 21076), False, 'import zlib\n'), ((24428, 24488), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".json.gz"""', 'delete': '(False)'}), "(suffix='.json.gz', delete=False)\n", (24455, 24488), False, 'import tempfile\n'), ((25461, 25492), 'os.unlink', 'os.unlink', (['local_temp_file.path'], {}), '(local_temp_file.path)\n', (25470, 25492), False, 'import os\n'), ((25837, 25877), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['blob.name', 'full_pattern'], {}), '(blob.name, full_pattern)\n', (25852, 25877), False, 'import fnmatch\n'), ((26128, 26207), 'logging.warning', 'logging.warning', (["('AzurePath is iterating over non-existent directory %s' % self)"], {}), "('AzurePath is iterating over non-existent directory %s' % self)\n", (26143, 26207), False, 'import logging\n'), ((26941, 26975), 'os.unlink', 'os.unlink', (['self.__cached_file_path'], {}), '(self.__cached_file_path)\n', (26950, 26975), False, 'import os\n'), ((28055, 28070), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (28065, 28070), False, 'import time\n'), ((5821, 5845), 'azure.identity.DefaultAzureCredential', 'DefaultAzureCredential', ([], {}), '()\n', (5843, 5845), False, 'from azure.identity import DefaultAzureCredential\n'), ((6096, 6205), 'azure.storage.blob.ContainerClient', 'ContainerClient', ([], {'account_url': 'account_endpoint', 'container_name': 'container_name', 'credential': 'token_credential'}), '(account_url=account_endpoint, container_name=container_name,\n credential=token_credential)\n', (6111, 6205), False, 'from azure.storage.blob import ContainerClient\n'), ((6746, 6804), 're.sub', 're.sub', (['"""\\\\${([^}]+)}"""', 'replace_by_env_var', 'cache_location'], {}), "('\\\\${([^}]+)}', replace_by_env_var, cache_location)\n", (6752, 6804), False, 'import re\n'), ((12471, 12491), 'gzip.open', 'gzip.open', (['self.path'], {}), '(self.path)\n', (12480, 12491), False, 'import gzip\n'), ((13738, 13758), 'gzip.open', 'gzip.open', (['self.path'], {}), '(self.path)\n', (13747, 13758), False, 'import gzip\n'), ((13789, 13803), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (13800, 13803), False, 'import pickle\n'), ((13889, 13903), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (13900, 13903), False, 'import pickle\n'), ((14017, 14037), 'gzip.open', 'gzip.open', (['self.path'], {}), '(self.path)\n', (14026, 14037), False, 'import gzip\n'), ((14072, 14133), 'msgpack.Unpacker', 'msgpack.Unpacker', (['f'], {'raw': '(False)', 'object_pairs_hook': 'OrderedDict'}), '(f, raw=False, object_pairs_hook=OrderedDict)\n', (14088, 14133), False, 'import msgpack\n'), ((14260, 14321), 'msgpack.Unpacker', 'msgpack.Unpacker', (['f'], {'raw': '(False)', 'object_pairs_hook': 'OrderedDict'}), '(f, raw=False, object_pairs_hook=OrderedDict)\n', (14276, 14321), False, 'import msgpack\n'), ((14460, 14480), 'gzip.open', 'gzip.open', (['self.path'], {}), '(self.path)\n', (14469, 14480), False, 'import gzip\n'), ((14511, 14526), 'msgpack.load', 'msgpack.load', (['f'], {}), '(f)\n', (14523, 14526), False, 'import msgpack\n'), ((14612, 14627), 'msgpack.load', 'msgpack.load', (['f'], {}), '(f)\n', (14624, 14627), False, 'import msgpack\n'), ((15118, 15148), 'dpu_utils.utils.dataloading.save_jsonl_gz', 'save_jsonl_gz', (['data', 'self.path'], {}), '(data, self.path)\n', (15131, 15148), False, 'from dpu_utils.utils.dataloading import save_json_gz, save_jsonl_gz\n'), ((19397, 19430), 'os.path.dirname', 'os.path.dirname', (['cached_file_path'], {}), '(cached_file_path)\n', (19412, 19430), False, 'import os\n'), ((20315, 20347), 'os.path.exists', 'os.path.exists', (['cached_file_path'], {}), '(cached_file_path)\n', (20329, 20347), False, 'import os\n'), ((21767, 21799), 'os.unlink', 'os.unlink', (['cached_file_path.path'], {}), '(cached_file_path.path)\n', (21776, 21799), False, 'import os\n'), ((23110, 23142), 'os.unlink', 'os.unlink', (['cached_file_path.path'], {}), '(cached_file_path.path)\n', (23119, 23142), False, 'import os\n'), ((23964, 23996), 'os.unlink', 'os.unlink', (['cached_file_path.path'], {}), '(cached_file_path.path)\n', (23973, 23996), False, 'import os\n'), ((24553, 24614), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".jsonl.gz"""', 'delete': '(False)'}), "(suffix='.jsonl.gz', delete=False)\n", (24580, 24614), False, 'import tempfile\n'), ((4125, 4151), 'json.load', 'json.load', (['azure_info_file'], {}), '(azure_info_file)\n', (4134, 4151), False, 'import json\n'), ((4829, 4902), 'azure.storage.blob.ContainerClient.from_connection_string', 'ContainerClient.from_connection_string', (['connection_string', 'container_name'], {}), '(connection_string, container_name)\n', (4867, 4902), False, 'from azure.storage.blob import ContainerClient\n'), ((6514, 6542), 'os.environ.get', 'os.environ.get', (['env_var_name'], {}), '(env_var_name)\n', (6528, 6542), False, 'import os\n'), ((8347, 8394), 'json.loads', 'json.loads', (['line'], {'object_pairs_hook': 'OrderedDict'}), '(line, object_pairs_hook=OrderedDict)\n', (8357, 8394), False, 'import json\n'), ((20366, 20393), 'os.remove', 'os.remove', (['cached_file_path'], {}), '(cached_file_path)\n', (20375, 20393), False, 'import os\n'), ((20464, 20496), 'os.remove', 'os.remove', (['cached_file_path_etag'], {}), '(cached_file_path_etag)\n', (20473, 20496), False, 'import os\n'), ((24677, 24736), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".pkl.gz"""', 'delete': '(False)'}), "(suffix='.pkl.gz', delete=False)\n", (24704, 24736), False, 'import tempfile\n'), ((5156, 5249), 'azure.storage.blob.ContainerClient.from_container_url', 'ContainerClient.from_container_url', (['f"""{account_endpoint}/{container_name}{query_string}"""'], {}), "(\n f'{account_endpoint}/{container_name}{query_string}')\n", (5190, 5249), False, 'from azure.storage.blob import ContainerClient\n'), ((13353, 13400), 'json.loads', 'json.loads', (['line'], {'object_pairs_hook': 'OrderedDict'}), '(line, object_pairs_hook=OrderedDict)\n', (13363, 13400), False, 'import json\n'), ((15212, 15242), 'gzip.GzipFile', 'gzip.GzipFile', (['self.path', '"""wb"""'], {}), "(self.path, 'wb')\n", (15225, 15242), False, 'import gzip\n'), ((15272, 15298), 'pickle.dump', 'pickle.dump', (['data', 'outfile'], {}), '(data, outfile)\n', (15283, 15298), False, 'import pickle\n'), ((16046, 16083), 'os.path.join', 'os.path.join', (['self.path', 'file_pattern'], {}), '(self.path, file_pattern)\n', (16058, 16083), False, 'import os\n'), ((24803, 24866), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".msgpack.gz"""', 'delete': '(False)'}), "(suffix='.msgpack.gz', delete=False)\n", (24830, 24866), False, 'import tempfile\n'), ((5461, 5534), 'azure.storage.blob.ContainerClient.from_connection_string', 'ContainerClient.from_connection_string', (['connection_string', 'container_name'], {}), '(connection_string, container_name)\n', (5499, 5534), False, 'from azure.storage.blob import ContainerClient\n'), ((15366, 15396), 'gzip.GzipFile', 'gzip.GzipFile', (['self.path', '"""wb"""'], {}), "(self.path, 'wb')\n", (15379, 15396), False, 'import gzip\n'), ((15426, 15453), 'msgpack.dump', 'msgpack.dump', (['data', 'outfile'], {}), '(data, outfile)\n', (15438, 15453), False, 'import msgpack\n'), ((24935, 25000), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".msgpack.l.gz"""', 'delete': '(False)'}), "(suffix='.msgpack.l.gz', delete=False)\n", (24962, 25000), False, 'import tempfile\n'), ((15523, 15553), 'gzip.GzipFile', 'gzip.GzipFile', (['self.path', '"""wb"""'], {}), "(self.path, 'wb')\n", (15536, 15553), False, 'import gzip\n'), ((15593, 15626), 'msgpack.Packer', 'msgpack.Packer', ([], {'use_bin_type': '(True)'}), '(use_bin_type=True)\n', (15607, 15626), False, 'import msgpack\n')] |
import numpy as np
import itertools
import time
import argparse
import torch
from torch.autograd import Variable
from torch.autograd import grad as torchgrad
import torch.nn.functional as F
from utils.ais import ais_trajectory
from utils.simulate import simulate_data
from utils.hparams import HParams
from utils.math_ops import sigmoidial_schedule
from utils.helper import get_model
parser = argparse.ArgumentParser(description='bidirectional_mc')
# action configuration flags
parser.add_argument('--n-ais-iwae', '-nai', type=int, default=100,
help='number of IMPORTANCE samples for AIS evaluation (default: 100). \
This is different from MC samples.')
parser.add_argument('--n-ais-dist', '-nad', type=int, default=10000,
help='number of distributions for AIS evaluation (default: 10000)')
parser.add_argument('--no-cuda', '-nc', action='store_true', help='force not use CUDA')
# model configuration flags
parser.add_argument('--z-size', '-zs', type=int, default=50,
help='dimensionality of latent code (default: 50)')
parser.add_argument('--batch-size', '-bs', type=int, default=100,
help='batch size (default: 100)')
parser.add_argument('--n-batch', '-nb', type=int, default=10,
help='total number of batches (default: 10)')
parser.add_argument('--eval-path', '-ep', type=str, default='model.pth',
help='path to load evaluation ckpt (default: model.pth)')
parser.add_argument('--dataset', '-d', type=str, default='mnist', choices=['mnist', 'fashion', 'cifar'],
help='dataset to train and evaluate on (default: mnist)')
parser.add_argument('--wide-encoder', '-we', action='store_true',
help='use wider layer (more hidden units for FC, more channels for CIFAR)')
parser.add_argument('--has-flow', '-hf', action='store_true',
help='use flow for training and eval')
parser.add_argument('--hamiltonian-flow', '-hamil-f', action='store_true')
parser.add_argument('--n-flows', '-nf', type=int, default=2, help='number of flows')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
def get_default_hparams():
return HParams(
z_size=args.z_size,
act_func=F.elu,
has_flow=args.has_flow,
hamiltonian_flow=args.hamiltonian_flow,
n_flows=args.n_flows,
wide_encoder=args.wide_encoder,
cuda=args.cuda,
)
def bdmc(model, loader, forward_schedule=np.linspace(0., 1., 500), n_sample=100):
"""Bidirectional Monte Carlo. Integrate forward and backward AIS.
The backward schedule is the reverse of the forward.
Args:
model (vae.VAE): VAE model
loader (iterator): iterator to loop over pairs of Variables; the first
entry being `x`, the second being `z` sampled from the true
posterior `p(z|x)`
forward_schedule (list or numpy.ndarray): forward temperature schedule;
backward schedule is used as its reverse
n_sample: number of importance (not simple MC) sample
Returns:
Two lists for forward and backward bounds on batchs of data
"""
# iterator is exhaustable in py3, so need duplicate
load, load_ = itertools.tee(loader, 2)
# forward chain
forward_logws = ais_trajectory(
model, load,
mode='forward', schedule=forward_schedule,
n_sample=n_sample
)
# backward chain
backward_schedule = np.flip(forward_schedule, axis=0)
backward_logws = ais_trajectory(
model, load_,
mode='backward',
schedule=backward_schedule,
n_sample=n_sample
)
upper_bounds = []
lower_bounds = []
for i, (forward, backward) in enumerate(zip(forward_logws, backward_logws)):
lower_bounds.append(forward.mean())
upper_bounds.append(backward.mean())
upper_bounds = np.mean(upper_bounds)
lower_bounds = np.mean(lower_bounds)
print ('Average bounds on simulated data: lower %.4f, upper %.4f' % (lower_bounds, upper_bounds))
return forward_logws, backward_logws
def main():
# sanity check
model = get_model(args.dataset, get_default_hparams())
model.load_state_dict(torch.load(args.eval_path)['state_dict'])
model.eval()
loader = simulate_data(model, batch_size=args.batch_size, n_batch=args.n_batch)
schedule = sigmoidial_schedule(args.n_ais_dist)
bdmc(model, loader, forward_schedule=schedule, n_sample=args.n_ais_iwae)
if __name__ == '__main__':
main()
| [
"numpy.flip",
"argparse.ArgumentParser",
"utils.ais.ais_trajectory",
"torch.load",
"utils.simulate.simulate_data",
"numpy.mean",
"torch.cuda.is_available",
"utils.math_ops.sigmoidial_schedule",
"numpy.linspace",
"itertools.tee",
"utils.hparams.HParams"
] | [((397, 452), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""bidirectional_mc"""'}), "(description='bidirectional_mc')\n", (420, 452), False, 'import argparse\n'), ((2198, 2223), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2221, 2223), False, 'import torch\n'), ((2264, 2449), 'utils.hparams.HParams', 'HParams', ([], {'z_size': 'args.z_size', 'act_func': 'F.elu', 'has_flow': 'args.has_flow', 'hamiltonian_flow': 'args.hamiltonian_flow', 'n_flows': 'args.n_flows', 'wide_encoder': 'args.wide_encoder', 'cuda': 'args.cuda'}), '(z_size=args.z_size, act_func=F.elu, has_flow=args.has_flow,\n hamiltonian_flow=args.hamiltonian_flow, n_flows=args.n_flows,\n wide_encoder=args.wide_encoder, cuda=args.cuda)\n', (2271, 2449), False, 'from utils.hparams import HParams\n'), ((2548, 2574), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(500)'], {}), '(0.0, 1.0, 500)\n', (2559, 2574), True, 'import numpy as np\n'), ((3305, 3329), 'itertools.tee', 'itertools.tee', (['loader', '(2)'], {}), '(loader, 2)\n', (3318, 3329), False, 'import itertools\n'), ((3371, 3464), 'utils.ais.ais_trajectory', 'ais_trajectory', (['model', 'load'], {'mode': '"""forward"""', 'schedule': 'forward_schedule', 'n_sample': 'n_sample'}), "(model, load, mode='forward', schedule=forward_schedule,\n n_sample=n_sample)\n", (3385, 3464), False, 'from utils.ais import ais_trajectory\n'), ((3537, 3570), 'numpy.flip', 'np.flip', (['forward_schedule'], {'axis': '(0)'}), '(forward_schedule, axis=0)\n', (3544, 3570), True, 'import numpy as np\n'), ((3592, 3688), 'utils.ais.ais_trajectory', 'ais_trajectory', (['model', 'load_'], {'mode': '"""backward"""', 'schedule': 'backward_schedule', 'n_sample': 'n_sample'}), "(model, load_, mode='backward', schedule=backward_schedule,\n n_sample=n_sample)\n", (3606, 3688), False, 'from utils.ais import ais_trajectory\n'), ((3959, 3980), 'numpy.mean', 'np.mean', (['upper_bounds'], {}), '(upper_bounds)\n', (3966, 3980), True, 'import numpy as np\n'), ((4000, 4021), 'numpy.mean', 'np.mean', (['lower_bounds'], {}), '(lower_bounds)\n', (4007, 4021), True, 'import numpy as np\n'), ((4358, 4428), 'utils.simulate.simulate_data', 'simulate_data', (['model'], {'batch_size': 'args.batch_size', 'n_batch': 'args.n_batch'}), '(model, batch_size=args.batch_size, n_batch=args.n_batch)\n', (4371, 4428), False, 'from utils.simulate import simulate_data\n'), ((4444, 4480), 'utils.math_ops.sigmoidial_schedule', 'sigmoidial_schedule', (['args.n_ais_dist'], {}), '(args.n_ais_dist)\n', (4463, 4480), False, 'from utils.math_ops import sigmoidial_schedule\n'), ((4285, 4311), 'torch.load', 'torch.load', (['args.eval_path'], {}), '(args.eval_path)\n', (4295, 4311), False, 'import torch\n')] |
import argparse
import gzip
import logging
import multiprocessing
import os
import pathlib
import traceback
from itertools import repeat
import numpy as np
from CONFIG.FOLDER_STRUCTURE import TARGET_DB_NAME, ATOMS, SEQUENCES, STRUCTURE_FILES_PATH, SEQ_ATOMS_DATASET_PATH, MMSEQS_DATABASES_PATH
from CONFIG.RUNTIME_PARAMETERS import CPU_COUNT, MAX_CHAIN_LENGTH
from utils.bio_utils import PROTEIN_LETTERS, STRUCTURE_FILES_PATTERNS
from CPP_lib.libAtomDistanceIO import save_atoms
from CPP_lib.libAtomDistanceIO import initialize as initialize_CPP_LIB
from utils.structure_files_parsers.parse_mmcif import parse_mmcif
from utils.structure_files_parsers.parse_pdb import parse_pdb
from utils.mmseqs_utils import mmseqs_createdb
from utils.mmseqs_utils import mmseqs_createindex
from utils.utils import create_unix_time_folder
def parse_args():
parser = argparse.ArgumentParser(description="Read structure files from -i to extract sequence and atom positions. "
"Save them in -o as .faa and .bin files. "
"Create and index new MMSEQS2 database in -db")
parser.add_argument("-i", "--input", required=False, default=STRUCTURE_FILES_PATH, help="Path to folder with structure files")
parser.add_argument("-o", "--output", required=False, default=SEQ_ATOMS_DATASET_PATH, help="Path to folder with sequences and atom positions")
parser.add_argument("-db", "--database", required=False, default=MMSEQS_DATABASES_PATH, help="Path to create new MMSEQS2 database")
parser.add_argument("--overwrite", action="store_true", help="Flag to override existing sequence and atom positions")
return parser.parse_args()
def parse_structure_file(structure_file, save_path):
file_id = structure_file.name
sequence_path = pathlib.Path(save_path) / SEQUENCES / (file_id + ".faa")
atoms_path = pathlib.Path(save_path) / ATOMS / (file_id + ".bin")
try:
if file_id.endswith('.pdb'):
file_id = file_id.replace('.pdb', '')
with open(structure_file, 'r') as f:
atom_amino_group, positions, groups = parse_pdb(f)
elif file_id.endswith('.pdb.gz'):
file_id = file_id.replace('.pdb.gz', '')
with gzip.open(structure_file, 'rt') as f:
atom_amino_group, positions, groups = parse_pdb(f)
elif file_id.endswith('.cif'):
file_id = file_id.replace('.cif', '')
with open(structure_file, 'r') as f:
atom_amino_group, positions, groups = parse_mmcif(f)
elif file_id.endswith('.cif.gz'):
file_id = file_id.replace('.cif.gz', '')
with gzip.open(structure_file, 'rt') as f:
atom_amino_group, positions, groups = parse_mmcif(f)
else:
print("Unsupported file format of file " + str(structure_file))
return
except Exception:
print("EXCEPTION WHILE READING FILE ", str(structure_file))
logging.error(traceback.format_exc())
return
try:
_, groups = np.unique(groups, return_index=True)
groups.sort()
if len(groups) < 9:
# print("Files containing protein chains shorter than 9 amino acids might be corrupted or contain DNA ", file)
return
if len(groups) > MAX_CHAIN_LENGTH:
# print(f"Protein chains with more than {MAX_CHAIN_LENGTH} are truncated. {str(structure_file)}\n"
# f"Change CONFIG/RUNTIME_PARAMETERS.py :MAX_CHAIN_LENGTH and rerun this script with --overwrite to apply process this file again")
group_indexes = groups[:MAX_CHAIN_LENGTH]
group_indexes = np.append(group_indexes, groups[MAX_CHAIN_LENGTH]).astype(np.int32)
else:
group_indexes = np.append(groups, positions.shape[0]).astype(np.int32)
sequence = ''.join([PROTEIN_LETTERS[atom_amino_group[i]] for i in group_indexes[:-1]])
with open(sequence_path, "w") as f:
f.write(">" + file_id + "\n" + sequence + "\n")
save_atoms(positions, group_indexes, str(atoms_path))
except Exception:
print("EXCEPTION DURING FILE PROCESSING ", str(structure_file))
logging.error(traceback.format_exc())
sequence_path.unlink(missing_ok=True)
atoms_path.unlink(missing_ok=True)
return
def main(input_path, atoms_path, db_path, overwrite):
atoms_path.mkdir(exist_ok=True, parents=True)
save_path = str(atoms_path.absolute())
print("Sequences and Atoms positions will be stored in: ", save_path)
(atoms_path / SEQUENCES).mkdir(exist_ok=True)
(atoms_path / ATOMS).mkdir(exist_ok=True)
print("Searching for structure files in: ", input_path)
structure_files = dict()
for pattern in STRUCTURE_FILES_PATTERNS:
pattern_structure_files = list(input_path.glob("**/*" + pattern))
pattern_structure_ids = [x.name[:-len(pattern)] for x in pattern_structure_files]
if len(pattern_structure_files) == 0:
continue
print("Found", len(pattern_structure_files), pattern, "files")
structure_files.update(zip(pattern_structure_ids, pattern_structure_files))
if len(structure_files) == 0:
print("No structure files found")
return
if not overwrite:
existing_structures = set([x.name[:-4] for x in (atoms_path / ATOMS).glob("**/*.bin")])
print("Found ", len(existing_structures), " already processed structures")
duplicated_ids_counter = 0
for id in list(structure_files.keys()):
if id in existing_structures:
structure_files.pop(id)
duplicated_ids_counter += 1
print("Found ", duplicated_ids_counter, " duplicated IDs")
initialize_CPP_LIB()
print("Processing", len(structure_files), "files")
with multiprocessing.Pool(processes=CPU_COUNT) as p:
p.starmap(parse_structure_file, zip(structure_files.values(), repeat(save_path)))
print("Merging sequence files for mmseqs2")
os.system(f"cat {atoms_path / SEQUENCES}/* > {atoms_path / 'merged_sequences.faa'}")
mmseqs2_path = create_unix_time_folder(db_path)
print("Creating new mmseqs2 database " + str(mmseqs2_path))
mmseqs_createdb(atoms_path / 'merged_sequences.faa', mmseqs2_path / TARGET_DB_NAME)
print("Indexing new mmseqs2 database " + str(mmseqs2_path))
mmseqs_createindex(mmseqs2_path / TARGET_DB_NAME)
if __name__ == '__main__':
args = parse_args()
input_path = pathlib.Path(args.input)
output_path = pathlib.Path(args.output)
db_path = pathlib.Path(args.database)
overwrite = args.overwrite
main(input_path, output_path, db_path, overwrite)
| [
"itertools.repeat",
"utils.mmseqs_utils.mmseqs_createdb",
"utils.utils.create_unix_time_folder",
"CPP_lib.libAtomDistanceIO.initialize",
"argparse.ArgumentParser",
"utils.mmseqs_utils.mmseqs_createindex",
"gzip.open",
"utils.structure_files_parsers.parse_mmcif.parse_mmcif",
"utils.structure_files_pa... | [((859, 1061), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Read structure files from -i to extract sequence and atom positions. Save them in -o as .faa and .bin files. Create and index new MMSEQS2 database in -db"""'}), "(description=\n 'Read structure files from -i to extract sequence and atom positions. Save them in -o as .faa and .bin files. Create and index new MMSEQS2 database in -db'\n )\n", (882, 1061), False, 'import argparse\n'), ((5811, 5831), 'CPP_lib.libAtomDistanceIO.initialize', 'initialize_CPP_LIB', ([], {}), '()\n', (5829, 5831), True, 'from CPP_lib.libAtomDistanceIO import initialize as initialize_CPP_LIB\n'), ((6087, 6176), 'os.system', 'os.system', (['f"""cat {atoms_path / SEQUENCES}/* > {atoms_path / \'merged_sequences.faa\'}"""'], {}), '(\n f"cat {atoms_path / SEQUENCES}/* > {atoms_path / \'merged_sequences.faa\'}")\n', (6096, 6176), False, 'import os\n'), ((6192, 6224), 'utils.utils.create_unix_time_folder', 'create_unix_time_folder', (['db_path'], {}), '(db_path)\n', (6215, 6224), False, 'from utils.utils import create_unix_time_folder\n'), ((6293, 6380), 'utils.mmseqs_utils.mmseqs_createdb', 'mmseqs_createdb', (["(atoms_path / 'merged_sequences.faa')", '(mmseqs2_path / TARGET_DB_NAME)'], {}), "(atoms_path / 'merged_sequences.faa', mmseqs2_path /\n TARGET_DB_NAME)\n", (6308, 6380), False, 'from utils.mmseqs_utils import mmseqs_createdb\n'), ((6445, 6494), 'utils.mmseqs_utils.mmseqs_createindex', 'mmseqs_createindex', (['(mmseqs2_path / TARGET_DB_NAME)'], {}), '(mmseqs2_path / TARGET_DB_NAME)\n', (6463, 6494), False, 'from utils.mmseqs_utils import mmseqs_createindex\n'), ((6566, 6590), 'pathlib.Path', 'pathlib.Path', (['args.input'], {}), '(args.input)\n', (6578, 6590), False, 'import pathlib\n'), ((6609, 6634), 'pathlib.Path', 'pathlib.Path', (['args.output'], {}), '(args.output)\n', (6621, 6634), False, 'import pathlib\n'), ((6649, 6676), 'pathlib.Path', 'pathlib.Path', (['args.database'], {}), '(args.database)\n', (6661, 6676), False, 'import pathlib\n'), ((3107, 3143), 'numpy.unique', 'np.unique', (['groups'], {'return_index': '(True)'}), '(groups, return_index=True)\n', (3116, 3143), True, 'import numpy as np\n'), ((5896, 5937), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'CPU_COUNT'}), '(processes=CPU_COUNT)\n', (5916, 5937), False, 'import multiprocessing\n'), ((1833, 1856), 'pathlib.Path', 'pathlib.Path', (['save_path'], {}), '(save_path)\n', (1845, 1856), False, 'import pathlib\n'), ((1907, 1930), 'pathlib.Path', 'pathlib.Path', (['save_path'], {}), '(save_path)\n', (1919, 1930), False, 'import pathlib\n'), ((2160, 2172), 'utils.structure_files_parsers.parse_pdb.parse_pdb', 'parse_pdb', (['f'], {}), '(f)\n', (2169, 2172), False, 'from utils.structure_files_parsers.parse_pdb import parse_pdb\n'), ((3038, 3060), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3058, 3060), False, 'import traceback\n'), ((4268, 4290), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4288, 4290), False, 'import traceback\n'), ((6014, 6031), 'itertools.repeat', 'repeat', (['save_path'], {}), '(save_path)\n', (6020, 6031), False, 'from itertools import repeat\n'), ((2285, 2316), 'gzip.open', 'gzip.open', (['structure_file', '"""rt"""'], {}), "(structure_file, 'rt')\n", (2294, 2316), False, 'import gzip\n'), ((2377, 2389), 'utils.structure_files_parsers.parse_pdb.parse_pdb', 'parse_pdb', (['f'], {}), '(f)\n', (2386, 2389), False, 'from utils.structure_files_parsers.parse_pdb import parse_pdb\n'), ((3724, 3774), 'numpy.append', 'np.append', (['group_indexes', 'groups[MAX_CHAIN_LENGTH]'], {}), '(group_indexes, groups[MAX_CHAIN_LENGTH])\n', (3733, 3774), True, 'import numpy as np\n'), ((3834, 3871), 'numpy.append', 'np.append', (['groups', 'positions.shape[0]'], {}), '(groups, positions.shape[0])\n', (3843, 3871), True, 'import numpy as np\n'), ((2582, 2596), 'utils.structure_files_parsers.parse_mmcif.parse_mmcif', 'parse_mmcif', (['f'], {}), '(f)\n', (2593, 2596), False, 'from utils.structure_files_parsers.parse_mmcif import parse_mmcif\n'), ((2709, 2740), 'gzip.open', 'gzip.open', (['structure_file', '"""rt"""'], {}), "(structure_file, 'rt')\n", (2718, 2740), False, 'import gzip\n'), ((2801, 2815), 'utils.structure_files_parsers.parse_mmcif.parse_mmcif', 'parse_mmcif', (['f'], {}), '(f)\n', (2812, 2815), False, 'from utils.structure_files_parsers.parse_mmcif import parse_mmcif\n')] |
"""Experiment to quantify the correlation between the streamline
distance (MAM or MDF) against the Euclidean distance on the
corresponding dissimilarity representation embedding of the
streamlines.
"""
import numpy as np
import nibabel as nib
from euclidean_embeddings import dissimilarity
from functools import partial
from dipy.tracking.distances import (bundles_distances_mdf,
bundles_distances_mam)
from dipy.tracking.streamline import set_number_of_points
from euclidean_embeddings.subsampling import compute_subset
import os
def experiment(filename_idx, embedding, k, distance_function, nb_points, distance_threshold):
n = 300 # number of streamlines to query for neighbors
max_neighbors = 200
max_streamlines = 100000
savefig = True
extension_format = '.jpg'
results_dir = 'results_%d/' % filename_idx
if not os.path.exists(results_dir):
os.makedirs(results_dir)
filenames = ['sub-100206_var-FNAL_tract.trk', 'sub-500222_var-EPR_tract.tck']
filename = filenames[filename_idx]
print("Loading %s" % filename)
streamlines2 = nib.streamlines.load(filename).streamlines
print("Subsampling %s at random from the whole tractogram, to reduce computations" % max_streamlines)
streamlines = streamlines2[np.random.permutation(len(streamlines2))[:max_streamlines]]
if distance_function == bundles_distances_mdf or embedding == 'FLAT' or embedding == 'FLATFLIP':
print("Resampling streamlines to %s points because of MDF or FLAT embedding" % nb_points)
streamlines = np.array(set_number_of_points(streamlines, nb_points=nb_points))
distance_name = 'MDF%d' % nb_points
elif distance_function == bundles_distances_mam:
distance_name = 'MAM'
streamlines = np.array(streamlines, dtype=np.object)
else:
raise NotImplementedError
# landmark_policy = 'random'
landmark_policy = 'sff'
if embedding == 'DR':
print("Computing %s prototypes with %s policy" % (k, landmark_policy))
prototype_idx = compute_subset(dataset=streamlines,
distance=distance_function,
num_landmarks=k,
landmark_policy=landmark_policy)
embedding_name = embedding + '%03d' % k
elif embedding == 'FLAT' or embedding == 'FLATFLIP':
embedding_name = embedding
# assert(distance_function == bundles_distances_mdf)
else:
raise NotImplementedError
original_distance = []
euclidean_distance = []
streamline1_idx = []
streamline2_idx = []
print("Randomly subsampling %s streamlines for nearest-neighbors queries" % n)
s1_idx = np.random.permutation(len(streamlines))[:n]
print("Computing %s on streamlines vs Euclidean distance on %s" % (distance_name, embedding_name))
for i, idx in enumerate(s1_idx):
print(i)
s1 = streamlines[idx]
distances = distance_function([s1], streamlines)[0]
tmp = np.where(distances < distance_threshold)[0]
tmp = tmp[tmp != 0.0] # remove s1_idx from the result
if len(tmp) > max_neighbors:
print("Trimming %s neighbors to %s" % (len(tmp), max_neighbors))
tmp = np.random.permutation(tmp)[:max_neighbors]
streamline1_idx.append([idx] * len(tmp))
streamline2_idx.append(tmp)
original_distance.append(distances[tmp])
if embedding == 'DR':
v_s1 = distance_function([s1], streamlines[prototype_idx])
v_neighbors = distance_function(streamlines[tmp],
streamlines[prototype_idx])
elif embedding == 'FLAT' or embedding == 'FLATFLIP':
v_s1 = s1.flatten()
v_neighbors = streamlines[tmp].reshape(tmp.shape[0], -1)
else:
raise NotImplementedError
if embedding == 'FLATFLIP':
direct_distances = np.linalg.norm(v_s1 - v_neighbors, axis=1)
flipped_distances = np.linalg.norm(v_s1.reshape(-1, 3)[::-1].flatten() - v_neighbors, axis=1)
euclidean_distance.append(np.minimum(direct_distances, flipped_distances))
else:
euclidean_distance.append(np.linalg.norm(v_s1 - v_neighbors, axis=1))
streamline1_idx = np.concatenate(streamline1_idx)
streamline2_idx = np.concatenate(streamline2_idx)
original_distance = np.concatenate(original_distance)
euclidean_distance = np.concatenate(euclidean_distance)
global_correlation = np.corrcoef(original_distance, euclidean_distance)[0, 1]
print("Global correlation: %s" % global_correlation)
import matplotlib.pyplot as plt
plt.ion()
plt.figure()
plt.plot(original_distance, euclidean_distance, 'o')
plt.xlabel('$'+distance_name+'$')
plt.ylabel('Euclidean distance')
plt.title(r'$%s$ vs Euclid(%s): $\rho$=%f' % (distance_name,
embedding_name,
global_correlation))
filename_fig = results_dir + '%s_vs_%s_%d_%d' % (distance_name,
embedding_name,
original_distance.min(),
distance_threshold)
if savefig:
tmp = filename_fig + extension_format
print('Saving figure to %s' % tmp)
plt.savefig(tmp)
print("Local correlation:")
n_steps = 10
distance_threshold_min = np.linspace(0, original_distance.max(), n_steps)
correlations = np.zeros(n_steps - 1)
for i, (dtmin, dtmax) in enumerate(zip(distance_threshold_min[:-1], distance_threshold_min[1:])):
tmp = np.logical_and(original_distance > dtmin, original_distance <= dtmax)
# tmp = original_distance <= dtmax
od = original_distance[tmp]
ed = euclidean_distance[tmp]
correlations[i] = np.corrcoef(od, ed)[0, 1]
print("%s) %s - %s : %s, corr=%s" % (i, dtmin, dtmax, tmp.sum(), correlations[i]))
plt.figure()
# plt.hist(correlations, bins=distance_threshold_min)
plt.bar(distance_threshold_min[:-1], correlations, width=np.diff(distance_threshold_min).mean())
plt.xlabel(distance_name)
plt.ylabel('correlation')
plt.title(r'$\rho$($%s$, Euclid(%s)) in different intervals' % (distance_name,
embedding_name))
plt.xlim([distance_threshold_min.min(), distance_threshold_min.max()])
plt.ylim([min(0, correlations.min()), 1.0])
plt.plot([distance_threshold_min.min(),
distance_threshold_min.max()], [global_correlation,
global_correlation], 'r-',
label=r'global $\rho$')
plt.plot([distance_threshold_min.min(),
distance_threshold_min.max()], [correlations.mean(),
correlations.mean()], 'g-',
label=r'avg $\rho$')
plt.legend()
if savefig:
tmp = filename_fig + '_correlations' + extension_format
print('Saving figure to %s' % tmp)
plt.savefig(tmp)
if __name__ == '__main__':
np.random.seed(42)
filename_idx = 0
embedding = 'DR' # 'FLATFLIP' # 'FLIP'
k = 40
distance_function = bundles_distances_mdf
# distance_function = bundles_distances_mam
nb_points = 20
distance_threshold = 200.0
experiment(filename_idx, embedding, k, distance_function, nb_points, distance_threshold)
| [
"matplotlib.pyplot.title",
"numpy.random.seed",
"matplotlib.pyplot.figure",
"numpy.linalg.norm",
"os.path.exists",
"nibabel.streamlines.load",
"numpy.minimum",
"numpy.corrcoef",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ion",
"dipy.tracking.streamline.set_number_of_points",
"numpy.random.... | [((4344, 4375), 'numpy.concatenate', 'np.concatenate', (['streamline1_idx'], {}), '(streamline1_idx)\n', (4358, 4375), True, 'import numpy as np\n'), ((4398, 4429), 'numpy.concatenate', 'np.concatenate', (['streamline2_idx'], {}), '(streamline2_idx)\n', (4412, 4429), True, 'import numpy as np\n'), ((4454, 4487), 'numpy.concatenate', 'np.concatenate', (['original_distance'], {}), '(original_distance)\n', (4468, 4487), True, 'import numpy as np\n'), ((4513, 4547), 'numpy.concatenate', 'np.concatenate', (['euclidean_distance'], {}), '(euclidean_distance)\n', (4527, 4547), True, 'import numpy as np\n'), ((4728, 4737), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (4735, 4737), True, 'import matplotlib.pyplot as plt\n'), ((4742, 4754), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4752, 4754), True, 'import matplotlib.pyplot as plt\n'), ((4759, 4811), 'matplotlib.pyplot.plot', 'plt.plot', (['original_distance', 'euclidean_distance', '"""o"""'], {}), "(original_distance, euclidean_distance, 'o')\n", (4767, 4811), True, 'import matplotlib.pyplot as plt\n'), ((4816, 4853), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('$' + distance_name + '$')"], {}), "('$' + distance_name + '$')\n", (4826, 4853), True, 'import matplotlib.pyplot as plt\n'), ((4854, 4886), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Euclidean distance"""'], {}), "('Euclidean distance')\n", (4864, 4886), True, 'import matplotlib.pyplot as plt\n'), ((4891, 4992), 'matplotlib.pyplot.title', 'plt.title', (["('$%s$ vs Euclid(%s): $\\\\rho$=%f' % (distance_name, embedding_name,\n global_correlation))"], {}), "('$%s$ vs Euclid(%s): $\\\\rho$=%f' % (distance_name, embedding_name,\n global_correlation))\n", (4900, 4992), True, 'import matplotlib.pyplot as plt\n'), ((5658, 5679), 'numpy.zeros', 'np.zeros', (['(n_steps - 1)'], {}), '(n_steps - 1)\n', (5666, 5679), True, 'import numpy as np\n'), ((6130, 6142), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6140, 6142), True, 'import matplotlib.pyplot as plt\n'), ((6306, 6331), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['distance_name'], {}), '(distance_name)\n', (6316, 6331), True, 'import matplotlib.pyplot as plt\n'), ((6336, 6361), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""correlation"""'], {}), "('correlation')\n", (6346, 6361), True, 'import matplotlib.pyplot as plt\n'), ((6366, 6466), 'matplotlib.pyplot.title', 'plt.title', (["('$\\\\rho$($%s$, Euclid(%s)) in different intervals' % (distance_name,\n embedding_name))"], {}), "('$\\\\rho$($%s$, Euclid(%s)) in different intervals' % (\n distance_name, embedding_name))\n", (6375, 6466), True, 'import matplotlib.pyplot as plt\n'), ((7096, 7108), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7106, 7108), True, 'import matplotlib.pyplot as plt\n'), ((7290, 7308), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (7304, 7308), True, 'import numpy as np\n'), ((885, 912), 'os.path.exists', 'os.path.exists', (['results_dir'], {}), '(results_dir)\n', (899, 912), False, 'import os\n'), ((922, 946), 'os.makedirs', 'os.makedirs', (['results_dir'], {}), '(results_dir)\n', (933, 946), False, 'import os\n'), ((1123, 1153), 'nibabel.streamlines.load', 'nib.streamlines.load', (['filename'], {}), '(filename)\n', (1143, 1153), True, 'import nibabel as nib\n'), ((2072, 2189), 'euclidean_embeddings.subsampling.compute_subset', 'compute_subset', ([], {'dataset': 'streamlines', 'distance': 'distance_function', 'num_landmarks': 'k', 'landmark_policy': 'landmark_policy'}), '(dataset=streamlines, distance=distance_function,\n num_landmarks=k, landmark_policy=landmark_policy)\n', (2086, 2189), False, 'from euclidean_embeddings.subsampling import compute_subset\n'), ((4573, 4623), 'numpy.corrcoef', 'np.corrcoef', (['original_distance', 'euclidean_distance'], {}), '(original_distance, euclidean_distance)\n', (4584, 4623), True, 'import numpy as np\n'), ((5490, 5506), 'matplotlib.pyplot.savefig', 'plt.savefig', (['tmp'], {}), '(tmp)\n', (5501, 5506), True, 'import matplotlib.pyplot as plt\n'), ((5796, 5865), 'numpy.logical_and', 'np.logical_and', (['(original_distance > dtmin)', '(original_distance <= dtmax)'], {}), '(original_distance > dtmin, original_distance <= dtmax)\n', (5810, 5865), True, 'import numpy as np\n'), ((7240, 7256), 'matplotlib.pyplot.savefig', 'plt.savefig', (['tmp'], {}), '(tmp)\n', (7251, 7256), True, 'import matplotlib.pyplot as plt\n'), ((1593, 1647), 'dipy.tracking.streamline.set_number_of_points', 'set_number_of_points', (['streamlines'], {'nb_points': 'nb_points'}), '(streamlines, nb_points=nb_points)\n', (1613, 1647), False, 'from dipy.tracking.streamline import set_number_of_points\n'), ((1798, 1836), 'numpy.array', 'np.array', (['streamlines'], {'dtype': 'np.object'}), '(streamlines, dtype=np.object)\n', (1806, 1836), True, 'import numpy as np\n'), ((3055, 3095), 'numpy.where', 'np.where', (['(distances < distance_threshold)'], {}), '(distances < distance_threshold)\n', (3063, 3095), True, 'import numpy as np\n'), ((3989, 4031), 'numpy.linalg.norm', 'np.linalg.norm', (['(v_s1 - v_neighbors)'], {'axis': '(1)'}), '(v_s1 - v_neighbors, axis=1)\n', (4003, 4031), True, 'import numpy as np\n'), ((6008, 6027), 'numpy.corrcoef', 'np.corrcoef', (['od', 'ed'], {}), '(od, ed)\n', (6019, 6027), True, 'import numpy as np\n'), ((3294, 3320), 'numpy.random.permutation', 'np.random.permutation', (['tmp'], {}), '(tmp)\n', (3315, 3320), True, 'import numpy as np\n'), ((4176, 4223), 'numpy.minimum', 'np.minimum', (['direct_distances', 'flipped_distances'], {}), '(direct_distances, flipped_distances)\n', (4186, 4223), True, 'import numpy as np\n'), ((4277, 4319), 'numpy.linalg.norm', 'np.linalg.norm', (['(v_s1 - v_neighbors)'], {'axis': '(1)'}), '(v_s1 - v_neighbors, axis=1)\n', (4291, 4319), True, 'import numpy as np\n'), ((6262, 6293), 'numpy.diff', 'np.diff', (['distance_threshold_min'], {}), '(distance_threshold_min)\n', (6269, 6293), True, 'import numpy as np\n')] |
from netCDF4 import Dataset
import numpy as np
import pandas as pd
import canyon_tools.readout_tools as rout
#from MITgcmutils import rdmds # cant make it work
#CGrid = '/data/kramosmu/results/TracerExperiments/CNTDIFF/run38/gridGlob.nc' #
#phiHyd = '/data/kramosmu/results/TracerExperiments/CNTDIFF/run38/phiHydGlob.nc'
CGrid = '/data/kramosmu/results/TracerExperiments/3DVISC_REALISTIC/run29/gridGlob.nc' #
phiHyd = '/data/kramosmu/results/TracerExperiments/3DVISC_REALISTIC/run29/phiHydGlob.nc'
pout = Dataset(phiHyd)
CGridOut = Dataset(CGrid)
# General input
nx = 360
ny = 360
nz = 90
nt = 19 # t dimension size
rc = CGridOut.variables['RC']
xc = rout.getField(CGrid, 'XC') # x coords tracer cells
yc = rout.getField(CGrid, 'YC') # y coords tracer cells
drF = CGridOut.variables['drF'] # vertical distance between faces
drC = CGridOut.variables['drC'] # vertical distance between centers
hFacC = rout.getField(CGrid, 'HFacC')
MaskC = rout.getMask(CGrid, 'HFacC')
rA = rout.getField(CGrid, 'rA')
bathy = rout.getField(CGrid, 'Depth')
# STATIONS
ys = [#262,220,262,227,100,245,
275,
#245,262,220,
] # 288, 275 for longer canyons
xs = [#60,60,180,180,180,160,
200,
#200,300,300,
]
stations = [#'UpSh','UpSl','CH','CM','CO','UpC',
'UwH'#'DnC','DnSh','DnSl',
]
#All experiments in CNT and 3D including no canyon one (run07)
expList = ['/data/kramosmu/results/TracerExperiments/3DVISC_REALISTIC/run29',
]
expNames = ['3DVISC_REALISTIC_run29',
]
#RhoRef = np.squeeze(rdmds('/data/kramosmu/results/TracerExperiments/CNTDIFF/run38/RhoRef')) # I cannot make this function work
RhoRef = 999.79998779 # It is constant throughout my runs
nzlim = 30
zfin = 30
xi = 180
yi = 50
xh1=120
xh2=240
yh1=227
yh2=267
g = 9.81 # ms^-2
alpha = 2.0E-4 # 1/degC
beta = 7.4E-4
times = [0,2,4,6,8,10,12,14,16,18]
for exp,runs in zip(expList,expNames):
print(runs)
CState = Dataset('%s/stateGlob.nc' %exp)
CPhi = Dataset('%s/phiHydGlob.nc' %exp)
Temp = CState.variables['Temp'][:,:,:,0:360]
S = CState.variables['S'][:,:,:,0:360]
P = CPhi.variables['phiHyd'][:,:,:,0:360]
MaskExpand = np.expand_dims(MaskC[:,:,0:360],0)
maskExp = MaskExpand + np.zeros((Temp).shape)
TempMask=np.ma.array(Temp,mask=maskExp)
SMask=np.ma.array(S,mask=maskExp)
print(runs,'done reading')
for yi,xi,sname in zip(ys,xs,stations): # station indices
N = np.ma.empty((len(times),nz-2))
N2 = np.ma.empty((len(times),nz-2))
ii = 0
for tt in times:
#Linear eq. of state
rho = RhoRef*(np.ones(np.shape(TempMask[tt,:,yi,xi])) - alpha*(TempMask[tt,:,yi,xi]) + beta*(SMask[tt,:,yi,xi]))
# N^2 for each station
N[ii,:] = ((-g/RhoRef)*((rho[2:] - rho[:-2])/(-drC[3:]-drC[2:-1])))**(0.5)
N2[ii,:] = ((-g/RhoRef)*((rho[2:] - rho[:-2])/(-drC[3:]-drC[2:-1])))
ii = ii+1
raw_data = {'drC' : drC[2:-1],'N_tt00': N[0,:],'N_tt02': N[1,:],'N_tt04': N[2,:],'N_tt06': N[3,:],
'N_tt08': N[4,:],'N_tt10': N[5,:],'N_tt12': N[6,:],'N_tt14': N[7,:],'N_tt16': N[8,:],
'N_tt18': N[9,:]}
raw_data2 = {'drC' : drC[2:-1],'N2_tt00': N2[0,:],'N2_tt02': N2[1,:],'N2_tt04': N2[2,:],'N2_tt06': N2[3,:],
'N2_tt08': N2[4,:],'N2_tt10': N2[5,:],'N2_tt12': N2[6,:],'N2_tt14': N2[7,:],'N2_tt16': N2[8,:],
'N2_tt18': N2[9,:]}
df = pd.DataFrame(raw_data, columns = ['drC', 'N_tt00', 'N_tt02', 'N_tt04', 'N_tt06', 'N_tt08','N_tt10',
'N_tt12','N_tt14', 'N_tt16','N_tt18' ])
df2 = pd.DataFrame(raw_data2, columns = ['drC', 'N2_tt00', 'N2_tt02', 'N2_tt04', 'N2_tt06', 'N2_tt08','N2_tt10',
'N2_tt12','N2_tt14', 'N2_tt16','N2_tt18' ])
filename1 = ('../results/metricsDataFrames/N_%s_%s.csv' % (runs,sname))
filename2 = ('../results/metricsDataFrames/N2_%s_%s.csv' % (runs,sname))
df.to_csv(filename1)
df2.to_csv(filename2)
| [
"netCDF4.Dataset",
"pandas.DataFrame",
"numpy.zeros",
"numpy.expand_dims",
"numpy.shape",
"numpy.ma.array",
"canyon_tools.readout_tools.getMask",
"canyon_tools.readout_tools.getField"
] | [((509, 524), 'netCDF4.Dataset', 'Dataset', (['phiHyd'], {}), '(phiHyd)\n', (516, 524), False, 'from netCDF4 import Dataset\n'), ((536, 550), 'netCDF4.Dataset', 'Dataset', (['CGrid'], {}), '(CGrid)\n', (543, 550), False, 'from netCDF4 import Dataset\n'), ((660, 686), 'canyon_tools.readout_tools.getField', 'rout.getField', (['CGrid', '"""XC"""'], {}), "(CGrid, 'XC')\n", (673, 686), True, 'import canyon_tools.readout_tools as rout\n'), ((716, 742), 'canyon_tools.readout_tools.getField', 'rout.getField', (['CGrid', '"""YC"""'], {}), "(CGrid, 'YC')\n", (729, 742), True, 'import canyon_tools.readout_tools as rout\n'), ((911, 940), 'canyon_tools.readout_tools.getField', 'rout.getField', (['CGrid', '"""HFacC"""'], {}), "(CGrid, 'HFacC')\n", (924, 940), True, 'import canyon_tools.readout_tools as rout\n'), ((949, 977), 'canyon_tools.readout_tools.getMask', 'rout.getMask', (['CGrid', '"""HFacC"""'], {}), "(CGrid, 'HFacC')\n", (961, 977), True, 'import canyon_tools.readout_tools as rout\n'), ((983, 1009), 'canyon_tools.readout_tools.getField', 'rout.getField', (['CGrid', '"""rA"""'], {}), "(CGrid, 'rA')\n", (996, 1009), True, 'import canyon_tools.readout_tools as rout\n'), ((1020, 1049), 'canyon_tools.readout_tools.getField', 'rout.getField', (['CGrid', '"""Depth"""'], {}), "(CGrid, 'Depth')\n", (1033, 1049), True, 'import canyon_tools.readout_tools as rout\n'), ((1993, 2025), 'netCDF4.Dataset', 'Dataset', (["('%s/stateGlob.nc' % exp)"], {}), "('%s/stateGlob.nc' % exp)\n", (2000, 2025), False, 'from netCDF4 import Dataset\n'), ((2037, 2070), 'netCDF4.Dataset', 'Dataset', (["('%s/phiHydGlob.nc' % exp)"], {}), "('%s/phiHydGlob.nc' % exp)\n", (2044, 2070), False, 'from netCDF4 import Dataset\n'), ((2234, 2271), 'numpy.expand_dims', 'np.expand_dims', (['MaskC[:, :, 0:360]', '(0)'], {}), '(MaskC[:, :, 0:360], 0)\n', (2248, 2271), True, 'import numpy as np\n'), ((2342, 2373), 'numpy.ma.array', 'np.ma.array', (['Temp'], {'mask': 'maskExp'}), '(Temp, mask=maskExp)\n', (2353, 2373), True, 'import numpy as np\n'), ((2386, 2414), 'numpy.ma.array', 'np.ma.array', (['S'], {'mask': 'maskExp'}), '(S, mask=maskExp)\n', (2397, 2414), True, 'import numpy as np\n'), ((2297, 2317), 'numpy.zeros', 'np.zeros', (['Temp.shape'], {}), '(Temp.shape)\n', (2305, 2317), True, 'import numpy as np\n'), ((3622, 3765), 'pandas.DataFrame', 'pd.DataFrame', (['raw_data'], {'columns': "['drC', 'N_tt00', 'N_tt02', 'N_tt04', 'N_tt06', 'N_tt08', 'N_tt10',\n 'N_tt12', 'N_tt14', 'N_tt16', 'N_tt18']"}), "(raw_data, columns=['drC', 'N_tt00', 'N_tt02', 'N_tt04',\n 'N_tt06', 'N_tt08', 'N_tt10', 'N_tt12', 'N_tt14', 'N_tt16', 'N_tt18'])\n", (3634, 3765), True, 'import pandas as pd\n'), ((3827, 3985), 'pandas.DataFrame', 'pd.DataFrame', (['raw_data2'], {'columns': "['drC', 'N2_tt00', 'N2_tt02', 'N2_tt04', 'N2_tt06', 'N2_tt08', 'N2_tt10',\n 'N2_tt12', 'N2_tt14', 'N2_tt16', 'N2_tt18']"}), "(raw_data2, columns=['drC', 'N2_tt00', 'N2_tt02', 'N2_tt04',\n 'N2_tt06', 'N2_tt08', 'N2_tt10', 'N2_tt12', 'N2_tt14', 'N2_tt16',\n 'N2_tt18'])\n", (3839, 3985), True, 'import pandas as pd\n'), ((2734, 2767), 'numpy.shape', 'np.shape', (['TempMask[tt, :, yi, xi]'], {}), '(TempMask[tt, :, yi, xi])\n', (2742, 2767), True, 'import numpy as np\n')] |
from .CarDEC_optimization import grad_reconstruction as grad, MSEloss
from .CarDEC_dataloaders import simpleloader, aeloader
import tensorflow as tf
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Dense, concatenate
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.backend import set_floatx
from time import time
import random
import numpy as np
from scipy.stats import zscore
import os
set_floatx('float32')
class SAE(Model):
def __init__(self, dims, act = 'relu', actincenter = "tanh",
random_seed = 201809, splitseed = 215, init = "glorot_uniform", optimizer = Adam(),
weights_dir = 'CarDEC Weights'):
""" This class method initializes the SAE model.
Arguments:
------------------------------------------------------------------
- dims: `list`, the number of output features for each layer of the HVG encoder. The length of the list determines the number of layers.
- act: `str`, The activation function used for the intermediate layers of CarDEC, other than the bottleneck layer.
- actincenter: `str`, The activation function used for the bottleneck layer of CarDEC.
- random_seed: `int`, The seed used for random weight intialization.
- splitseed: `int`, The seed used to split cells between training and validation. Should be consistent between iterations to ensure the same cells are always used for validation.
- init: `str`, The weight initialization strategy for the autoencoder.
- optimizer: `tensorflow.python.keras.optimizer_v2`, An instance of a TensorFlow optimizer.
- weights_dir: `str`, the path in which to save the weights of the CarDEC model.
"""
super(SAE, self).__init__()
tf.keras.backend.clear_session()
self.weights_dir = weights_dir
self.dims = dims
self.n_stacks = len(dims) - 1
self.init = init
self.optimizer = optimizer
self.random_seed = random_seed
self.splitseed = splitseed
self.activation = act
self.actincenter = actincenter #hidden layer activation function
#set random seed
random.seed(random_seed)
np.random.seed(random_seed)
tf.random.set_seed(random_seed)
encoder_layers = []
for i in range(self.n_stacks-1):
encoder_layers.append(Dense(self.dims[i + 1], kernel_initializer = self.init, activation = self.activation, name='encoder_%d' % i))
encoder_layers.append(Dense(self.dims[-1], kernel_initializer=self.init, activation=self.actincenter, name='embedding'))
self.encoder = Sequential(encoder_layers, name = 'encoder')
decoder_layers = []
for i in range(self.n_stacks - 1, 0, -1):
decoder_layers.append(Dense(self.dims[i], kernel_initializer = self.init, activation = self.activation
, name = 'decoder%d' % (i-1)))
decoder_layers.append(Dense(self.dims[0], activation = 'linear', name='output'))
self.decoder = Sequential(decoder_layers, name = 'decoder')
self.construct()
def call(self, x):
""" This is the forward pass of the model.
***Inputs***
- x: `tf.Tensor`, an input tensor of shape (n_obs, p_HVG).
***Outputs***
- output: `tf.Tensor`, A (n_obs, p_HVG) tensor of denoised HVG expression.
"""
c = self.encoder(x)
output = self.decoder(c)
return output
def load_encoder(self, random_seed = 2312):
""" This class method can be used to load the encoder weights, while randomly reinitializing the decoder weights.
Arguments:
------------------------------------------------------------------
- random_seed: `int`, Seed for reinitializing the decoder.
"""
tf.keras.backend.clear_session()
#set random seed
random.seed(random_seed)
np.random.seed(random_seed)
tf.random.set_seed(random_seed)
self.encoder.load_weights("./" + self.weights_dir + "/pretrained_encoder_weights").expect_partial()
decoder_layers = []
for i in range(self.n_stacks - 1, 0, -1):
decoder_layers.append(Dense(self.dims[i], kernel_initializer = self.init, activation = self.activation
, name='decoder%d' % (i-1)))
self.decoder_base = Sequential(decoder_layers, name = 'decoderbase')
self.output_layer = Dense(self.dims[0], activation = 'linear', name='output')
self.construct(summarize = False)
def load_autoencoder(self, ):
""" This class method can be used to load the full model's weights."""
tf.keras.backend.clear_session()
self.load_weights("./" + self.weights_dir + "/pretrained_autoencoder_weights").expect_partial()
def construct(self, summarize = False):
""" This class method fully initalizes the TensorFlow model.
Arguments:
------------------------------------------------------------------
- summarize: `bool`, If True, then print a summary of the model architecture.
"""
x = tf.zeros(shape = (1, self.dims[0]), dtype=float)
out = self(x)
if summarize:
print("----------Autoencoder Architecture----------")
self.summary()
print("\n----------Encoder Sub-Architecture----------")
self.encoder.summary()
print("\n----------Base Decoder Sub-Architecture----------")
self.decoder.summary()
def denoise(self, adata, batch_size = 64):
""" This class method can be used to denoise gene expression for each cell.
Arguments:
------------------------------------------------------------------
- adata: `anndata.AnnData`, The annotated data matrix of shape (n_obs, n_vars).
- batch_size: `int`, The batch size used for computing denoised expression.
Returns:
------------------------------------------------------------------
- output: `np.ndarray`, Numpy array of denoised expression of shape (n_obs, n_vars)
"""
input_ds = simpleloader(adata.layers["normalized input"][:, adata.var['Variance Type'] == 'HVG'], batch_size)
output = np.zeros((adata.shape[0], self.dims[0]), dtype = 'float32')
start = 0
for x in input_ds:
end = start + x.shape[0]
output[start:end] = self(x).numpy()
start = end
return output
def embed(self, adata, batch_size = 64):
""" This class method can be used to compute the low-dimension embedding for HVG features.
Arguments:
------------------------------------------------------------------
- adata: `anndata.AnnData`, The annotated data matrix of shape (n_obs, n_vars).
- batch_size: `int`, The batch size for filling the array of low dimension embeddings.
Returns:
------------------------------------------------------------------
- embedding: `np.ndarray`, Array of shape (n_obs, n_vars) containing the cell HVG embeddings.
"""
input_ds = simpleloader(adata.layers["normalized input"][:, adata.var['Variance Type'] == 'HVG'], batch_size)
embedding = np.zeros((adata.shape[0], self.dims[-1]), dtype = 'float32')
start = 0
for x in input_ds:
end = start + x.shape[0]
embedding[start:end] = self.encoder(x).numpy()
start = end
return embedding
def makegenerators(self, adata, val_split, batch_size, splitseed):
""" This class method creates training and validation data generators for the current input data.
Arguments:
------------------------------------------------------------------
- adata: `anndata.AnnData`, the annotated data matrix of shape (n_obs, n_vars).
- val_split: `float`, The fraction of cells to be reserved for validation during this step.
- batch_size: `int`, The batch size used for training the model.
- splitseed: `int`, The seed used to split cells between training and validation.
Returns:
------------------------------------------------------------------
- train_dataset: `tf.data.Dataset`, Dataset that returns training examples.
- val_dataset: `tf.data.Dataset`, Dataset that returns validation examples.
"""
return aeloader(adata.layers["normalized input"][:, adata.var['Variance Type'] == 'HVG'], adata.layers["normalized input"][:, adata.var['Variance Type'] == 'HVG'], val_frac = val_split, batch_size = batch_size, splitseed = splitseed)
def train(self, adata, num_epochs = 2000, batch_size = 64, val_split = 0.1, lr = 1e-03, decay_factor = 1/3,
patience_LR = 3, patience_ES = 9, save_fullmodel = True):
""" This class method can be used to train the SAE.
Arguments:
------------------------------------------------------------------
- adata: `anndata.AnnData`, The annotated data matrix of shape (n_obs, n_vars).
- num_epochs: `int`, The maximum number of epochs allowed to train the full model. In practice, the model will halt training long before hitting this limit.
- batch_size: `int`, The batch size used for training the full model.
- val_split: `float`, The fraction of cells to be reserved for validation during this step.
- lr: `float`, The learning rate for training the full model.
- decay_factor: `float`, The multiplicative factor by which to decay the learning rate when validation loss is not decreasing.
- patience_LR: `int`, The number of epochs tolerated before decaying the learning rate during which the validation loss fails to decrease.
- patience_ES: `int`, The number of epochs tolerated before stopping training during which the validation loss fails to decrease.
- save_fullmodel: `bool`, If True, save the full model's weights, not just the encoder.
"""
tf.keras.backend.clear_session()
dataset = self.makegenerators(adata, val_split = 0.1, batch_size = batch_size, splitseed = self.splitseed)
counter_LR = 0
counter_ES = 0
best_loss = np.inf
self.optimizer.lr = lr
total_start = time()
for epoch in range(num_epochs):
epoch_start = time()
epoch_loss_avg = tf.keras.metrics.Mean()
epoch_loss_avg_val = tf.keras.metrics.Mean()
# Training loop - using batches of batch_size
for x, target in dataset(val = False):
loss_value, grads = grad(self, x, target, MSEloss)
self.optimizer.apply_gradients(zip(grads, self.trainable_variables))
epoch_loss_avg(loss_value) # Add current batch loss
# Validation Loop
for x, target in dataset(val = True):
output = self(x)
loss_value = MSEloss(target, output)
epoch_loss_avg_val(loss_value)
current_loss_val = epoch_loss_avg_val.result()
epoch_time = round(time() - epoch_start, 1)
print("Epoch {:03d}: Training Loss: {:.3f}, Validation Loss: {:.3f}, Time: {:.1f} s".format(epoch, epoch_loss_avg.result().numpy(), epoch_loss_avg_val.result().numpy(), epoch_time))
if(current_loss_val + 10**(-3) < best_loss):
counter_LR = 0
counter_ES = 0
best_loss = current_loss_val
else:
counter_LR = counter_LR + 1
counter_ES = counter_ES + 1
if patience_ES <= counter_ES:
break
if patience_LR <= counter_LR:
self.optimizer.lr = self.optimizer.lr * decay_factor
counter_LR = 0
print("\nDecaying Learning Rate to: " + str(self.optimizer.lr.numpy()))
# End epoch
total_time = round(time() - total_start, 2)
if not os.path.isdir("./" + self.weights_dir):
os.mkdir("./" + self.weights_dir)
self.save_weights("./" + self.weights_dir + "/pretrained_autoencoder_weights", save_format='tf')
self.encoder.save_weights("./" + self.weights_dir + "/pretrained_encoder_weights", save_format='tf')
print('\nTraining Completed')
print("Total training time: " + str(total_time) + " seconds")
| [
"tensorflow.random.set_seed",
"os.mkdir",
"numpy.random.seed",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.metrics.Mean",
"os.path.isdir",
"tensorflow.keras.backend.clear_session",
"numpy.zeros",
"time.time",
"tensorflow.zeros",
"tensorflow.keras.optimizers.Adam",
"random.seed",
"tens... | [((444, 465), 'tensorflow.keras.backend.set_floatx', 'set_floatx', (['"""float32"""'], {}), "('float32')\n", (454, 465), False, 'from tensorflow.keras.backend import set_floatx\n'), ((645, 651), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (649, 651), False, 'from tensorflow.keras.optimizers import Adam\n'), ((1825, 1857), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (1855, 1857), True, 'import tensorflow as tf\n'), ((2266, 2290), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (2277, 2290), False, 'import random\n'), ((2299, 2326), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (2313, 2326), True, 'import numpy as np\n'), ((2335, 2366), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['random_seed'], {}), '(random_seed)\n', (2353, 2366), True, 'import tensorflow as tf\n'), ((2762, 2804), 'tensorflow.keras.Sequential', 'Sequential', (['encoder_layers'], {'name': '"""encoder"""'}), "(encoder_layers, name='encoder')\n", (2772, 2804), False, 'from tensorflow.keras import Model, Sequential\n'), ((3206, 3248), 'tensorflow.keras.Sequential', 'Sequential', (['decoder_layers'], {'name': '"""decoder"""'}), "(decoder_layers, name='decoder')\n", (3216, 3248), False, 'from tensorflow.keras import Model, Sequential\n'), ((4085, 4117), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (4115, 4117), True, 'import tensorflow as tf\n'), ((4160, 4184), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (4171, 4184), False, 'import random\n'), ((4193, 4220), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (4207, 4220), True, 'import numpy as np\n'), ((4229, 4260), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['random_seed'], {}), '(random_seed)\n', (4247, 4260), True, 'import tensorflow as tf\n'), ((4674, 4720), 'tensorflow.keras.Sequential', 'Sequential', (['decoder_layers'], {'name': '"""decoderbase"""'}), "(decoder_layers, name='decoderbase')\n", (4684, 4720), False, 'from tensorflow.keras import Model, Sequential\n'), ((4760, 4815), 'tensorflow.keras.layers.Dense', 'Dense', (['self.dims[0]'], {'activation': '"""linear"""', 'name': '"""output"""'}), "(self.dims[0], activation='linear', name='output')\n", (4765, 4815), False, 'from tensorflow.keras.layers import Dense, concatenate\n'), ((5012, 5044), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (5042, 5044), True, 'import tensorflow as tf\n'), ((5495, 5541), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(1, self.dims[0])', 'dtype': 'float'}), '(shape=(1, self.dims[0]), dtype=float)\n', (5503, 5541), True, 'import tensorflow as tf\n'), ((6669, 6726), 'numpy.zeros', 'np.zeros', (['(adata.shape[0], self.dims[0])'], {'dtype': '"""float32"""'}), "((adata.shape[0], self.dims[0]), dtype='float32')\n", (6677, 6726), True, 'import numpy as np\n'), ((7743, 7801), 'numpy.zeros', 'np.zeros', (['(adata.shape[0], self.dims[-1])'], {'dtype': '"""float32"""'}), "((adata.shape[0], self.dims[-1]), dtype='float32')\n", (7751, 7801), True, 'import numpy as np\n'), ((10600, 10632), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (10630, 10632), True, 'import tensorflow as tf\n'), ((10910, 10916), 'time.time', 'time', ([], {}), '()\n', (10914, 10916), False, 'from time import time\n'), ((2640, 2742), 'tensorflow.keras.layers.Dense', 'Dense', (['self.dims[-1]'], {'kernel_initializer': 'self.init', 'activation': 'self.actincenter', 'name': '"""embedding"""'}), "(self.dims[-1], kernel_initializer=self.init, activation=self.\n actincenter, name='embedding')\n", (2645, 2742), False, 'from tensorflow.keras.layers import Dense, concatenate\n'), ((3115, 3170), 'tensorflow.keras.layers.Dense', 'Dense', (['self.dims[0]'], {'activation': '"""linear"""', 'name': '"""output"""'}), "(self.dims[0], activation='linear', name='output')\n", (3120, 3170), False, 'from tensorflow.keras.layers import Dense, concatenate\n'), ((10983, 10989), 'time.time', 'time', ([], {}), '()\n', (10987, 10989), False, 'from time import time\n'), ((11032, 11055), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), '()\n', (11053, 11055), True, 'import tensorflow as tf\n'), ((11089, 11112), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), '()\n', (11110, 11112), True, 'import tensorflow as tf\n'), ((12723, 12761), 'os.path.isdir', 'os.path.isdir', (["('./' + self.weights_dir)"], {}), "('./' + self.weights_dir)\n", (12736, 12761), False, 'import os\n'), ((12775, 12808), 'os.mkdir', 'os.mkdir', (["('./' + self.weights_dir)"], {}), "('./' + self.weights_dir)\n", (12783, 12808), False, 'import os\n'), ((2483, 2592), 'tensorflow.keras.layers.Dense', 'Dense', (['self.dims[i + 1]'], {'kernel_initializer': 'self.init', 'activation': 'self.activation', 'name': "('encoder_%d' % i)"}), "(self.dims[i + 1], kernel_initializer=self.init, activation=self.\n activation, name='encoder_%d' % i)\n", (2488, 2592), False, 'from tensorflow.keras.layers import Dense, concatenate\n'), ((2920, 3030), 'tensorflow.keras.layers.Dense', 'Dense', (['self.dims[i]'], {'kernel_initializer': 'self.init', 'activation': 'self.activation', 'name': "('decoder%d' % (i - 1))"}), "(self.dims[i], kernel_initializer=self.init, activation=self.\n activation, name='decoder%d' % (i - 1))\n", (2925, 3030), False, 'from tensorflow.keras.layers import Dense, concatenate\n'), ((4496, 4606), 'tensorflow.keras.layers.Dense', 'Dense', (['self.dims[i]'], {'kernel_initializer': 'self.init', 'activation': 'self.activation', 'name': "('decoder%d' % (i - 1))"}), "(self.dims[i], kernel_initializer=self.init, activation=self.\n activation, name='decoder%d' % (i - 1))\n", (4501, 4606), False, 'from tensorflow.keras.layers import Dense, concatenate\n'), ((12674, 12680), 'time.time', 'time', ([], {}), '()\n', (12678, 12680), False, 'from time import time\n'), ((11786, 11792), 'time.time', 'time', ([], {}), '()\n', (11790, 11792), False, 'from time import time\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.