body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
@property
def eigvals(self):
'Return the eigenvalues of the specified tensor product observable.\n\n This method uses pre-stored eigenvalues for standard observables where\n possible.\n\n Returns:\n array[float]: array containing the eigenvalues of the tensor product\n observable\n '
if (self._eigvals_cache is not None):
return self._eigvals_cache
standard_observables = {'PauliX', 'PauliY', 'PauliZ', 'Hadamard'}
self._eigvals_cache = pauli_eigs(len(self.wires))
obs_sorted = sorted(self.obs, key=(lambda x: [str(l) for l in x.wires.labels]))
if (set(self.name) - standard_observables):
self._eigvals_cache = np.array([1])
for (k, g) in itertools.groupby(obs_sorted, (lambda x: (x.name in standard_observables))):
if k:
self._eigvals_cache = np.kron(self._eigvals_cache, pauli_eigs(len(list(g))))
else:
for ns_ob in g:
self._eigvals_cache = np.kron(self._eigvals_cache, ns_ob.eigvals)
return self._eigvals_cache
| -838,111,445,676,742,700
|
Return the eigenvalues of the specified tensor product observable.
This method uses pre-stored eigenvalues for standard observables where
possible.
Returns:
array[float]: array containing the eigenvalues of the tensor product
observable
|
pennylane/operation.py
|
eigvals
|
DanielPolatajko/pennylane
|
python
|
@property
def eigvals(self):
'Return the eigenvalues of the specified tensor product observable.\n\n This method uses pre-stored eigenvalues for standard observables where\n possible.\n\n Returns:\n array[float]: array containing the eigenvalues of the tensor product\n observable\n '
if (self._eigvals_cache is not None):
return self._eigvals_cache
standard_observables = {'PauliX', 'PauliY', 'PauliZ', 'Hadamard'}
self._eigvals_cache = pauli_eigs(len(self.wires))
obs_sorted = sorted(self.obs, key=(lambda x: [str(l) for l in x.wires.labels]))
if (set(self.name) - standard_observables):
self._eigvals_cache = np.array([1])
for (k, g) in itertools.groupby(obs_sorted, (lambda x: (x.name in standard_observables))):
if k:
self._eigvals_cache = np.kron(self._eigvals_cache, pauli_eigs(len(list(g))))
else:
for ns_ob in g:
self._eigvals_cache = np.kron(self._eigvals_cache, ns_ob.eigvals)
return self._eigvals_cache
|
def diagonalizing_gates(self):
'Return the gate set that diagonalizes a circuit according to the\n specified tensor observable.\n\n This method uses pre-stored eigenvalues for standard observables where\n possible and stores the corresponding eigenvectors from the eigendecomposition.\n\n Returns:\n list: list containing the gates diagonalizing the tensor observable\n '
diag_gates = []
for o in self.obs:
diag_gates.extend(o.diagonalizing_gates())
return diag_gates
| 6,156,173,566,321,034,000
|
Return the gate set that diagonalizes a circuit according to the
specified tensor observable.
This method uses pre-stored eigenvalues for standard observables where
possible and stores the corresponding eigenvectors from the eigendecomposition.
Returns:
list: list containing the gates diagonalizing the tensor observable
|
pennylane/operation.py
|
diagonalizing_gates
|
DanielPolatajko/pennylane
|
python
|
def diagonalizing_gates(self):
'Return the gate set that diagonalizes a circuit according to the\n specified tensor observable.\n\n This method uses pre-stored eigenvalues for standard observables where\n possible and stores the corresponding eigenvectors from the eigendecomposition.\n\n Returns:\n list: list containing the gates diagonalizing the tensor observable\n '
diag_gates = []
for o in self.obs:
diag_gates.extend(o.diagonalizing_gates())
return diag_gates
|
@property
def matrix(self):
'Matrix representation of the tensor operator\n in the computational basis.\n\n **Example:**\n\n Note that the returned matrix *only includes explicitly\n declared observables* making up the tensor product;\n that is, it only returns the matrix for the specified\n subsystem it is defined for.\n\n >>> O = qml.PauliZ(0) @ qml.PauliZ(2)\n >>> O.matrix\n array([[ 1, 0, 0, 0],\n [ 0, -1, 0, 0],\n [ 0, 0, -1, 0],\n [ 0, 0, 0, 1]])\n\n To get the full :math:`2^3\\times 2^3` Hermitian matrix\n acting on the 3-qubit system, the identity on wire 1\n must be explicitly included:\n\n >>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)\n >>> O.matrix\n array([[ 1., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., -1., 0., -0., 0., -0., 0., -0.],\n [ 0., 0., 1., 0., 0., 0., 0., 0.],\n [ 0., -0., 0., -1., 0., -0., 0., -0.],\n [ 0., 0., 0., 0., -1., -0., -0., -0.],\n [ 0., -0., 0., -0., -0., 1., -0., 0.],\n [ 0., 0., 0., 0., -0., -0., -1., -0.],\n [ 0., -0., 0., -0., -0., 0., -0., 1.]])\n\n Returns:\n array: matrix representation\n '
U_list = []
for (_, g) in itertools.groupby(self.obs, (lambda x: x.wires.labels)):
mats = [i.matrix for i in g]
if (len(mats) > 1):
mats = [multi_dot(mats)]
U_list.append(mats[0])
return functools.reduce(np.kron, U_list)
| -51,734,776,827,415,040
|
Matrix representation of the tensor operator
in the computational basis.
**Example:**
Note that the returned matrix *only includes explicitly
declared observables* making up the tensor product;
that is, it only returns the matrix for the specified
subsystem it is defined for.
>>> O = qml.PauliZ(0) @ qml.PauliZ(2)
>>> O.matrix
array([[ 1, 0, 0, 0],
[ 0, -1, 0, 0],
[ 0, 0, -1, 0],
[ 0, 0, 0, 1]])
To get the full :math:`2^3\times 2^3` Hermitian matrix
acting on the 3-qubit system, the identity on wire 1
must be explicitly included:
>>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)
>>> O.matrix
array([[ 1., 0., 0., 0., 0., 0., 0., 0.],
[ 0., -1., 0., -0., 0., -0., 0., -0.],
[ 0., 0., 1., 0., 0., 0., 0., 0.],
[ 0., -0., 0., -1., 0., -0., 0., -0.],
[ 0., 0., 0., 0., -1., -0., -0., -0.],
[ 0., -0., 0., -0., -0., 1., -0., 0.],
[ 0., 0., 0., 0., -0., -0., -1., -0.],
[ 0., -0., 0., -0., -0., 0., -0., 1.]])
Returns:
array: matrix representation
|
pennylane/operation.py
|
matrix
|
DanielPolatajko/pennylane
|
python
|
@property
def matrix(self):
'Matrix representation of the tensor operator\n in the computational basis.\n\n **Example:**\n\n Note that the returned matrix *only includes explicitly\n declared observables* making up the tensor product;\n that is, it only returns the matrix for the specified\n subsystem it is defined for.\n\n >>> O = qml.PauliZ(0) @ qml.PauliZ(2)\n >>> O.matrix\n array([[ 1, 0, 0, 0],\n [ 0, -1, 0, 0],\n [ 0, 0, -1, 0],\n [ 0, 0, 0, 1]])\n\n To get the full :math:`2^3\\times 2^3` Hermitian matrix\n acting on the 3-qubit system, the identity on wire 1\n must be explicitly included:\n\n >>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)\n >>> O.matrix\n array([[ 1., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., -1., 0., -0., 0., -0., 0., -0.],\n [ 0., 0., 1., 0., 0., 0., 0., 0.],\n [ 0., -0., 0., -1., 0., -0., 0., -0.],\n [ 0., 0., 0., 0., -1., -0., -0., -0.],\n [ 0., -0., 0., -0., -0., 1., -0., 0.],\n [ 0., 0., 0., 0., -0., -0., -1., -0.],\n [ 0., -0., 0., -0., -0., 0., -0., 1.]])\n\n Returns:\n array: matrix representation\n '
U_list = []
for (_, g) in itertools.groupby(self.obs, (lambda x: x.wires.labels)):
mats = [i.matrix for i in g]
if (len(mats) > 1):
mats = [multi_dot(mats)]
U_list.append(mats[0])
return functools.reduce(np.kron, U_list)
|
def prune(self):
"Returns a pruned tensor product of observables by removing :class:`~.Identity` instances from\n the observables building up the :class:`~.Tensor`.\n\n The ``return_type`` attribute is preserved while pruning.\n\n If the tensor product only contains one observable, then this observable instance is\n returned.\n\n Note that, as a result, this method can return observables that are not a :class:`~.Tensor`\n instance.\n\n **Example:**\n\n Pruning that returns a :class:`~.Tensor`:\n\n >>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)\n >>> O.prune()\n <pennylane.operation.Tensor at 0x7fc1642d1590\n >>> [(o.name, o.wires) for o in O.prune().obs]\n [('PauliZ', [0]), ('PauliZ', [2])]\n\n Pruning that returns a single observable:\n\n >>> O = qml.PauliZ(0) @ qml.Identity(1)\n >>> O_pruned = O.prune()\n >>> (O_pruned.name, O_pruned.wires)\n ('PauliZ', [0])\n\n Returns:\n ~.Observable: the pruned tensor product of observables\n "
if (len(self.non_identity_obs) == 0):
obs = qml.Identity(self.wires[0])
elif (len(self.non_identity_obs) == 1):
obs = self.non_identity_obs[0]
else:
obs = Tensor(*self.non_identity_obs)
obs.return_type = self.return_type
return obs
| -3,808,663,140,798,350,300
|
Returns a pruned tensor product of observables by removing :class:`~.Identity` instances from
the observables building up the :class:`~.Tensor`.
The ``return_type`` attribute is preserved while pruning.
If the tensor product only contains one observable, then this observable instance is
returned.
Note that, as a result, this method can return observables that are not a :class:`~.Tensor`
instance.
**Example:**
Pruning that returns a :class:`~.Tensor`:
>>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)
>>> O.prune()
<pennylane.operation.Tensor at 0x7fc1642d1590
>>> [(o.name, o.wires) for o in O.prune().obs]
[('PauliZ', [0]), ('PauliZ', [2])]
Pruning that returns a single observable:
>>> O = qml.PauliZ(0) @ qml.Identity(1)
>>> O_pruned = O.prune()
>>> (O_pruned.name, O_pruned.wires)
('PauliZ', [0])
Returns:
~.Observable: the pruned tensor product of observables
|
pennylane/operation.py
|
prune
|
DanielPolatajko/pennylane
|
python
|
def prune(self):
"Returns a pruned tensor product of observables by removing :class:`~.Identity` instances from\n the observables building up the :class:`~.Tensor`.\n\n The ``return_type`` attribute is preserved while pruning.\n\n If the tensor product only contains one observable, then this observable instance is\n returned.\n\n Note that, as a result, this method can return observables that are not a :class:`~.Tensor`\n instance.\n\n **Example:**\n\n Pruning that returns a :class:`~.Tensor`:\n\n >>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)\n >>> O.prune()\n <pennylane.operation.Tensor at 0x7fc1642d1590\n >>> [(o.name, o.wires) for o in O.prune().obs]\n [('PauliZ', [0]), ('PauliZ', [2])]\n\n Pruning that returns a single observable:\n\n >>> O = qml.PauliZ(0) @ qml.Identity(1)\n >>> O_pruned = O.prune()\n >>> (O_pruned.name, O_pruned.wires)\n ('PauliZ', [0])\n\n Returns:\n ~.Observable: the pruned tensor product of observables\n "
if (len(self.non_identity_obs) == 0):
obs = qml.Identity(self.wires[0])
elif (len(self.non_identity_obs) == 1):
obs = self.non_identity_obs[0]
else:
obs = Tensor(*self.non_identity_obs)
obs.return_type = self.return_type
return obs
|
def heisenberg_expand(self, U, wires):
'Expand the given local Heisenberg-picture array into a full-system one.\n\n Args:\n U (array[float]): array to expand (expected to be of the dimension ``1+2*self.num_wires``)\n wires (Wires): wires on the device the array ``U`` should be expanded\n to apply to\n\n Raises:\n ValueError: if the size of the input matrix is invalid or `num_wires` is incorrect\n\n Returns:\n array[float]: expanded array, dimension ``1+2*num_wires``\n '
U_dim = len(U)
nw = len(self.wires)
if (U.ndim > 2):
raise ValueError('Only order-1 and order-2 arrays supported.')
if (U_dim != (1 + (2 * nw))):
raise ValueError('{}: Heisenberg matrix is the wrong size {}.'.format(self.name, U_dim))
if ((len(wires) == 0) or (len(self.wires) == len(wires))):
return U
if (not wires.contains_wires(self.wires)):
raise ValueError('{}: Some observable wires {} do not exist on this device with wires {}'.format(self.name, self.wires, wires))
wire_indices = wires.indices(self.wires)
dim = (1 + (len(wires) * 2))
def loc(w):
'Returns the slice denoting the location of (x_w, p_w) in the basis.'
ind = ((2 * w) + 1)
return slice(ind, (ind + 2))
if (U.ndim == 1):
W = np.zeros(dim)
W[0] = U[0]
for (k, w) in enumerate(wire_indices):
W[loc(w)] = U[loc(k)]
elif (U.ndim == 2):
if isinstance(self, Observable):
W = np.zeros((dim, dim))
else:
W = np.eye(dim)
W[(0, 0)] = U[(0, 0)]
for (k1, w1) in enumerate(wire_indices):
s1 = loc(k1)
d1 = loc(w1)
W[(d1, 0)] = U[(s1, 0)]
W[(0, d1)] = U[(0, s1)]
for (k2, w2) in enumerate(wire_indices):
W[(d1, loc(w2))] = U[(s1, loc(k2))]
return W
| -4,894,675,324,531,311,000
|
Expand the given local Heisenberg-picture array into a full-system one.
Args:
U (array[float]): array to expand (expected to be of the dimension ``1+2*self.num_wires``)
wires (Wires): wires on the device the array ``U`` should be expanded
to apply to
Raises:
ValueError: if the size of the input matrix is invalid or `num_wires` is incorrect
Returns:
array[float]: expanded array, dimension ``1+2*num_wires``
|
pennylane/operation.py
|
heisenberg_expand
|
DanielPolatajko/pennylane
|
python
|
def heisenberg_expand(self, U, wires):
'Expand the given local Heisenberg-picture array into a full-system one.\n\n Args:\n U (array[float]): array to expand (expected to be of the dimension ``1+2*self.num_wires``)\n wires (Wires): wires on the device the array ``U`` should be expanded\n to apply to\n\n Raises:\n ValueError: if the size of the input matrix is invalid or `num_wires` is incorrect\n\n Returns:\n array[float]: expanded array, dimension ``1+2*num_wires``\n '
U_dim = len(U)
nw = len(self.wires)
if (U.ndim > 2):
raise ValueError('Only order-1 and order-2 arrays supported.')
if (U_dim != (1 + (2 * nw))):
raise ValueError('{}: Heisenberg matrix is the wrong size {}.'.format(self.name, U_dim))
if ((len(wires) == 0) or (len(self.wires) == len(wires))):
return U
if (not wires.contains_wires(self.wires)):
raise ValueError('{}: Some observable wires {} do not exist on this device with wires {}'.format(self.name, self.wires, wires))
wire_indices = wires.indices(self.wires)
dim = (1 + (len(wires) * 2))
def loc(w):
'Returns the slice denoting the location of (x_w, p_w) in the basis.'
ind = ((2 * w) + 1)
return slice(ind, (ind + 2))
if (U.ndim == 1):
W = np.zeros(dim)
W[0] = U[0]
for (k, w) in enumerate(wire_indices):
W[loc(w)] = U[loc(k)]
elif (U.ndim == 2):
if isinstance(self, Observable):
W = np.zeros((dim, dim))
else:
W = np.eye(dim)
W[(0, 0)] = U[(0, 0)]
for (k1, w1) in enumerate(wire_indices):
s1 = loc(k1)
d1 = loc(w1)
W[(d1, 0)] = U[(s1, 0)]
W[(0, d1)] = U[(0, s1)]
for (k2, w2) in enumerate(wire_indices):
W[(d1, loc(w2))] = U[(s1, loc(k2))]
return W
|
@staticmethod
def _heisenberg_rep(p):
"Heisenberg picture representation of the operation.\n\n * For Gaussian CV gates, this method returns the matrix of the linear\n transformation carried out by the gate for the given parameter values.\n The method is not defined for non-Gaussian gates.\n\n **The existence of this method is equivalent to setting** ``grad_method = 'A'``.\n\n * For observables, returns a real vector (first-order observables) or\n symmetric matrix (second-order observables) of expansion coefficients\n of the observable.\n\n For single-mode Operations we use the basis :math:`\\mathbf{r} = (\\I, \\x, \\p)`.\n For multi-mode Operations we use the basis :math:`\\mathbf{r} = (\\I, \\x_0, \\p_0, \\x_1, \\p_1, \\ldots)`.\n\n .. note::\n\n For gates, we assume that the inverse transformation is obtained\n by negating the first parameter.\n\n Args:\n p (Sequence[float]): parameter values for the transformation\n\n Returns:\n array[float]: :math:`\\tilde{U}` or :math:`q`\n "
return None
| 7,129,213,776,502,749,000
|
Heisenberg picture representation of the operation.
* For Gaussian CV gates, this method returns the matrix of the linear
transformation carried out by the gate for the given parameter values.
The method is not defined for non-Gaussian gates.
**The existence of this method is equivalent to setting** ``grad_method = 'A'``.
* For observables, returns a real vector (first-order observables) or
symmetric matrix (second-order observables) of expansion coefficients
of the observable.
For single-mode Operations we use the basis :math:`\mathbf{r} = (\I, \x, \p)`.
For multi-mode Operations we use the basis :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)`.
.. note::
For gates, we assume that the inverse transformation is obtained
by negating the first parameter.
Args:
p (Sequence[float]): parameter values for the transformation
Returns:
array[float]: :math:`\tilde{U}` or :math:`q`
|
pennylane/operation.py
|
_heisenberg_rep
|
DanielPolatajko/pennylane
|
python
|
@staticmethod
def _heisenberg_rep(p):
"Heisenberg picture representation of the operation.\n\n * For Gaussian CV gates, this method returns the matrix of the linear\n transformation carried out by the gate for the given parameter values.\n The method is not defined for non-Gaussian gates.\n\n **The existence of this method is equivalent to setting** ``grad_method = 'A'``.\n\n * For observables, returns a real vector (first-order observables) or\n symmetric matrix (second-order observables) of expansion coefficients\n of the observable.\n\n For single-mode Operations we use the basis :math:`\\mathbf{r} = (\\I, \\x, \\p)`.\n For multi-mode Operations we use the basis :math:`\\mathbf{r} = (\\I, \\x_0, \\p_0, \\x_1, \\p_1, \\ldots)`.\n\n .. note::\n\n For gates, we assume that the inverse transformation is obtained\n by negating the first parameter.\n\n Args:\n p (Sequence[float]): parameter values for the transformation\n\n Returns:\n array[float]: :math:`\\tilde{U}` or :math:`q`\n "
return None
|
@classproperty
def supports_heisenberg(self):
'Returns True iff the CV Operation has overridden the :meth:`~.CV._heisenberg_rep`\n static method, thereby indicating that it is Gaussian and does not block the use\n of the parameter-shift differentiation method if found between the differentiated gate\n and an observable.\n '
return (CV._heisenberg_rep != self._heisenberg_rep)
| -8,325,412,333,071,810,000
|
Returns True iff the CV Operation has overridden the :meth:`~.CV._heisenberg_rep`
static method, thereby indicating that it is Gaussian and does not block the use
of the parameter-shift differentiation method if found between the differentiated gate
and an observable.
|
pennylane/operation.py
|
supports_heisenberg
|
DanielPolatajko/pennylane
|
python
|
@classproperty
def supports_heisenberg(self):
'Returns True iff the CV Operation has overridden the :meth:`~.CV._heisenberg_rep`\n static method, thereby indicating that it is Gaussian and does not block the use\n of the parameter-shift differentiation method if found between the differentiated gate\n and an observable.\n '
return (CV._heisenberg_rep != self._heisenberg_rep)
|
@classproperty
def supports_parameter_shift(self):
"Returns True iff the CV Operation supports the parameter-shift differentiation method.\n This means that it has ``grad_method='A'`` and\n has overridden the :meth:`~.CV._heisenberg_rep` static method.\n "
return ((self.grad_method == 'A') and self.supports_heisenberg)
| 879,775,891,246,995,800
|
Returns True iff the CV Operation supports the parameter-shift differentiation method.
This means that it has ``grad_method='A'`` and
has overridden the :meth:`~.CV._heisenberg_rep` static method.
|
pennylane/operation.py
|
supports_parameter_shift
|
DanielPolatajko/pennylane
|
python
|
@classproperty
def supports_parameter_shift(self):
"Returns True iff the CV Operation supports the parameter-shift differentiation method.\n This means that it has ``grad_method='A'`` and\n has overridden the :meth:`~.CV._heisenberg_rep` static method.\n "
return ((self.grad_method == 'A') and self.supports_heisenberg)
|
def heisenberg_pd(self, idx):
'Partial derivative of the Heisenberg picture transform matrix.\n\n Computed using grad_recipe.\n\n Args:\n idx (int): index of the parameter with respect to which the\n partial derivative is computed.\n Returns:\n array[float]: partial derivative\n '
recipe = self.grad_recipe[idx]
multiplier = 0.5
a = 1
shift = (np.pi / 2)
default_param_shift = [[multiplier, a, shift], [(- multiplier), a, (- shift)]]
param_shift = (default_param_shift if (recipe is None) else recipe)
pd = None
p = self.parameters
original_p_idx = p[idx]
for (c, _a, s) in param_shift:
p[idx] = ((_a * original_p_idx) + s)
U = self._heisenberg_rep(p)
if (pd is None):
pd = (c * U)
else:
pd += (c * U)
return pd
| -5,387,619,469,353,053,000
|
Partial derivative of the Heisenberg picture transform matrix.
Computed using grad_recipe.
Args:
idx (int): index of the parameter with respect to which the
partial derivative is computed.
Returns:
array[float]: partial derivative
|
pennylane/operation.py
|
heisenberg_pd
|
DanielPolatajko/pennylane
|
python
|
def heisenberg_pd(self, idx):
'Partial derivative of the Heisenberg picture transform matrix.\n\n Computed using grad_recipe.\n\n Args:\n idx (int): index of the parameter with respect to which the\n partial derivative is computed.\n Returns:\n array[float]: partial derivative\n '
recipe = self.grad_recipe[idx]
multiplier = 0.5
a = 1
shift = (np.pi / 2)
default_param_shift = [[multiplier, a, shift], [(- multiplier), a, (- shift)]]
param_shift = (default_param_shift if (recipe is None) else recipe)
pd = None
p = self.parameters
original_p_idx = p[idx]
for (c, _a, s) in param_shift:
p[idx] = ((_a * original_p_idx) + s)
U = self._heisenberg_rep(p)
if (pd is None):
pd = (c * U)
else:
pd += (c * U)
return pd
|
def heisenberg_tr(self, wires, inverse=False):
'Heisenberg picture representation of the linear transformation carried\n out by the gate at current parameter values.\n\n Given a unitary quantum gate :math:`U`, we may consider its linear\n transformation in the Heisenberg picture, :math:`U^\\dagger(\\cdot) U`.\n\n If the gate is Gaussian, this linear transformation preserves the polynomial order\n of any observables that are polynomials in :math:`\\mathbf{r} = (\\I, \\x_0, \\p_0, \\x_1, \\p_1, \\ldots)`.\n This also means it maps :math:`\\text{span}(\\mathbf{r})` into itself:\n\n .. math:: U^\\dagger \\mathbf{r}_i U = \\sum_j \\tilde{U}_{ij} \\mathbf{r}_j\n\n For Gaussian CV gates, this method returns the transformation matrix for\n the current parameter values of the Operation. The method is not defined\n for non-Gaussian (and non-CV) gates.\n\n Args:\n wires (Wires): wires on the device that the observable gets applied to\n inverse (bool): if True, return the inverse transformation instead\n\n Raises:\n RuntimeError: if the specified operation is not Gaussian or is missing the `_heisenberg_rep` method\n\n Returns:\n array[float]: :math:`\\tilde{U}`, the Heisenberg picture representation of the linear transformation\n '
p = self.parameters
if inverse:
if (self.par_domain == 'A'):
p[0] = np.linalg.inv(p[0])
else:
p[0] = (- p[0])
U = self._heisenberg_rep(p)
if (U is None):
raise RuntimeError('{} is not a Gaussian operation, or is missing the _heisenberg_rep method.'.format(self.name))
return self.heisenberg_expand(U, wires)
| -4,233,527,887,951,043,000
|
Heisenberg picture representation of the linear transformation carried
out by the gate at current parameter values.
Given a unitary quantum gate :math:`U`, we may consider its linear
transformation in the Heisenberg picture, :math:`U^\dagger(\cdot) U`.
If the gate is Gaussian, this linear transformation preserves the polynomial order
of any observables that are polynomials in :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)`.
This also means it maps :math:`\text{span}(\mathbf{r})` into itself:
.. math:: U^\dagger \mathbf{r}_i U = \sum_j \tilde{U}_{ij} \mathbf{r}_j
For Gaussian CV gates, this method returns the transformation matrix for
the current parameter values of the Operation. The method is not defined
for non-Gaussian (and non-CV) gates.
Args:
wires (Wires): wires on the device that the observable gets applied to
inverse (bool): if True, return the inverse transformation instead
Raises:
RuntimeError: if the specified operation is not Gaussian or is missing the `_heisenberg_rep` method
Returns:
array[float]: :math:`\tilde{U}`, the Heisenberg picture representation of the linear transformation
|
pennylane/operation.py
|
heisenberg_tr
|
DanielPolatajko/pennylane
|
python
|
def heisenberg_tr(self, wires, inverse=False):
'Heisenberg picture representation of the linear transformation carried\n out by the gate at current parameter values.\n\n Given a unitary quantum gate :math:`U`, we may consider its linear\n transformation in the Heisenberg picture, :math:`U^\\dagger(\\cdot) U`.\n\n If the gate is Gaussian, this linear transformation preserves the polynomial order\n of any observables that are polynomials in :math:`\\mathbf{r} = (\\I, \\x_0, \\p_0, \\x_1, \\p_1, \\ldots)`.\n This also means it maps :math:`\\text{span}(\\mathbf{r})` into itself:\n\n .. math:: U^\\dagger \\mathbf{r}_i U = \\sum_j \\tilde{U}_{ij} \\mathbf{r}_j\n\n For Gaussian CV gates, this method returns the transformation matrix for\n the current parameter values of the Operation. The method is not defined\n for non-Gaussian (and non-CV) gates.\n\n Args:\n wires (Wires): wires on the device that the observable gets applied to\n inverse (bool): if True, return the inverse transformation instead\n\n Raises:\n RuntimeError: if the specified operation is not Gaussian or is missing the `_heisenberg_rep` method\n\n Returns:\n array[float]: :math:`\\tilde{U}`, the Heisenberg picture representation of the linear transformation\n '
p = self.parameters
if inverse:
if (self.par_domain == 'A'):
p[0] = np.linalg.inv(p[0])
else:
p[0] = (- p[0])
U = self._heisenberg_rep(p)
if (U is None):
raise RuntimeError('{} is not a Gaussian operation, or is missing the _heisenberg_rep method.'.format(self.name))
return self.heisenberg_expand(U, wires)
|
def heisenberg_obs(self, wires):
'Representation of the observable in the position/momentum operator basis.\n\n Returns the expansion :math:`q` of the observable, :math:`Q`, in the\n basis :math:`\\mathbf{r} = (\\I, \\x_0, \\p_0, \\x_1, \\p_1, \\ldots)`.\n\n * For first-order observables returns a real vector such\n that :math:`Q = \\sum_i q_i \\mathbf{r}_i`.\n\n * For second-order observables returns a real symmetric matrix\n such that :math:`Q = \\sum_{ij} q_{ij} \\mathbf{r}_i \\mathbf{r}_j`.\n\n Args:\n wires (Wires): wires on the device that the observable gets applied to\n Returns:\n array[float]: :math:`q`\n '
p = self.parameters
U = self._heisenberg_rep(p)
return self.heisenberg_expand(U, wires)
| -4,023,639,562,349,603,300
|
Representation of the observable in the position/momentum operator basis.
Returns the expansion :math:`q` of the observable, :math:`Q`, in the
basis :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)`.
* For first-order observables returns a real vector such
that :math:`Q = \sum_i q_i \mathbf{r}_i`.
* For second-order observables returns a real symmetric matrix
such that :math:`Q = \sum_{ij} q_{ij} \mathbf{r}_i \mathbf{r}_j`.
Args:
wires (Wires): wires on the device that the observable gets applied to
Returns:
array[float]: :math:`q`
|
pennylane/operation.py
|
heisenberg_obs
|
DanielPolatajko/pennylane
|
python
|
def heisenberg_obs(self, wires):
'Representation of the observable in the position/momentum operator basis.\n\n Returns the expansion :math:`q` of the observable, :math:`Q`, in the\n basis :math:`\\mathbf{r} = (\\I, \\x_0, \\p_0, \\x_1, \\p_1, \\ldots)`.\n\n * For first-order observables returns a real vector such\n that :math:`Q = \\sum_i q_i \\mathbf{r}_i`.\n\n * For second-order observables returns a real symmetric matrix\n such that :math:`Q = \\sum_{ij} q_{ij} \\mathbf{r}_i \\mathbf{r}_j`.\n\n Args:\n wires (Wires): wires on the device that the observable gets applied to\n Returns:\n array[float]: :math:`q`\n '
p = self.parameters
U = self._heisenberg_rep(p)
return self.heisenberg_expand(U, wires)
|
def evaluate(p):
'Evaluate a single parameter.'
if isinstance(p, np.ndarray):
if (p.dtype == object):
temp = np.array([(x.val if isinstance(x, Variable) else x) for x in p.flat])
return temp.reshape(p.shape)
return p
if isinstance(p, list):
evaled_list = []
for arr in p:
if (arr.dtype == object):
temp = np.array([(x.val if isinstance(x, Variable) else x) for x in arr.flat])
evaled_list.append(temp.reshape(arr.shape))
return evaled_list
return p
if isinstance(p, Variable):
p = self.check_domain(p.val)
return p
| 5,925,395,568,560,989,000
|
Evaluate a single parameter.
|
pennylane/operation.py
|
evaluate
|
DanielPolatajko/pennylane
|
python
|
def evaluate(p):
if isinstance(p, np.ndarray):
if (p.dtype == object):
temp = np.array([(x.val if isinstance(x, Variable) else x) for x in p.flat])
return temp.reshape(p.shape)
return p
if isinstance(p, list):
evaled_list = []
for arr in p:
if (arr.dtype == object):
temp = np.array([(x.val if isinstance(x, Variable) else x) for x in arr.flat])
evaled_list.append(temp.reshape(arr.shape))
return evaled_list
return p
if isinstance(p, Variable):
p = self.check_domain(p.val)
return p
|
def loc(w):
'Returns the slice denoting the location of (x_w, p_w) in the basis.'
ind = ((2 * w) + 1)
return slice(ind, (ind + 2))
| 8,122,650,268,305,981,000
|
Returns the slice denoting the location of (x_w, p_w) in the basis.
|
pennylane/operation.py
|
loc
|
DanielPolatajko/pennylane
|
python
|
def loc(w):
ind = ((2 * w) + 1)
return slice(ind, (ind + 2))
|
def __init__(self, faces, vertexes=None):
'\n See :class:`MeshData <pyqtgraph.opengl.MeshData>` for initialization arguments.\n '
if isinstance(faces, MeshData):
self.data = faces
else:
self.data = MeshData()
self.data.setFaces(faces, vertexes)
GLGraphicsItem.__init__(self)
| 6,456,963,119,954,934,000
|
See :class:`MeshData <pyqtgraph.opengl.MeshData>` for initialization arguments.
|
pyqtgraph/opengl/items/GLMeshItem.py
|
__init__
|
robertsj/poropy
|
python
|
def __init__(self, faces, vertexes=None):
'\n \n '
if isinstance(faces, MeshData):
self.data = faces
else:
self.data = MeshData()
self.data.setFaces(faces, vertexes)
GLGraphicsItem.__init__(self)
|
def Video_AutoInit():
"This is a function that's called from the c extension code\n just before the display module is initialized"
if (MacOS and (not MacOS.WMAvailable())):
if (not sdlmain_osx.WMEnable()):
raise ImportError('Can not access the window manager. Use py2app or execute with the pythonw script.')
if (not sdlmain_osx.RunningFromBundleWithNSApplication()):
try:
default_icon_data = getResource('pygame_icon.tiff').read()
except IOError:
default_icon_data = None
except NotImplementedError:
default_icon_data = None
sdlmain_osx.InstallNSApplication(default_icon_data)
if ((os.getcwd() == '/') and (len(sys.argv) > 1)):
os.chdir(os.path.dirname(sys.argv[0]))
return True
| -1,161,491,649,498,932,500
|
This is a function that's called from the c extension code
just before the display module is initialized
|
venv/Lib/site-packages/pygame/macosx.py
|
Video_AutoInit
|
AdamaTraore75020/PYBomber
|
python
|
def Video_AutoInit():
"This is a function that's called from the c extension code\n just before the display module is initialized"
if (MacOS and (not MacOS.WMAvailable())):
if (not sdlmain_osx.WMEnable()):
raise ImportError('Can not access the window manager. Use py2app or execute with the pythonw script.')
if (not sdlmain_osx.RunningFromBundleWithNSApplication()):
try:
default_icon_data = getResource('pygame_icon.tiff').read()
except IOError:
default_icon_data = None
except NotImplementedError:
default_icon_data = None
sdlmain_osx.InstallNSApplication(default_icon_data)
if ((os.getcwd() == '/') and (len(sys.argv) > 1)):
os.chdir(os.path.dirname(sys.argv[0]))
return True
|
def define_parameters(self):
'\n Define the CLI arguments accepted by this plugin app.\n Use self.add_argument to specify a new app argument.\n '
self.add_argument('--executable', dest='executable', type=str, optional=True, help='the conversion program to use', default='/usr/bin/mri_convert')
self.add_argument('--inputFile', dest='inputFile', type=str, optional=True, help='the input file', default='')
self.add_argument('--outputFile', dest='outputFile', type=str, optional=True, help='the output file', default='')
self.add_argument('--execArgs', dest='execArgs', type=str, optional=True, help='additonal arguments for the chosen executable', default='')
| -7,354,833,314,273,596,000
|
Define the CLI arguments accepted by this plugin app.
Use self.add_argument to specify a new app argument.
|
mri_convert_ppc64/mri_convert_ppc64.py
|
define_parameters
|
quinnyyy/pl-mri_convert_ppc64
|
python
|
def define_parameters(self):
'\n Define the CLI arguments accepted by this plugin app.\n Use self.add_argument to specify a new app argument.\n '
self.add_argument('--executable', dest='executable', type=str, optional=True, help='the conversion program to use', default='/usr/bin/mri_convert')
self.add_argument('--inputFile', dest='inputFile', type=str, optional=True, help='the input file', default=)
self.add_argument('--outputFile', dest='outputFile', type=str, optional=True, help='the output file', default=)
self.add_argument('--execArgs', dest='execArgs', type=str, optional=True, help='additonal arguments for the chosen executable', default=)
|
def run(self, options):
'\n Define the code to be run by this plugin app.\n '
if (not len(options.inputFile)):
print('ERROR: No input file has been specified!')
print('You must specify an input file relative to the input directory.')
sys.exit(1)
if (not len(options.outputFile)):
print('ERROR: No output file has been specified!')
print('You must specicy an output file relative to the output directory.')
sys.exit(1)
str_cmd = ('%s %s %s/%s %s/%s' % (options.executable, options.execArgs, options.inputdir, options.inputFile, options.outputdir, options.outputFile))
os.system(str_cmd)
| 5,497,002,344,144,382,000
|
Define the code to be run by this plugin app.
|
mri_convert_ppc64/mri_convert_ppc64.py
|
run
|
quinnyyy/pl-mri_convert_ppc64
|
python
|
def run(self, options):
'\n \n '
if (not len(options.inputFile)):
print('ERROR: No input file has been specified!')
print('You must specify an input file relative to the input directory.')
sys.exit(1)
if (not len(options.outputFile)):
print('ERROR: No output file has been specified!')
print('You must specicy an output file relative to the output directory.')
sys.exit(1)
str_cmd = ('%s %s %s/%s %s/%s' % (options.executable, options.execArgs, options.inputdir, options.inputFile, options.outputdir, options.outputFile))
os.system(str_cmd)
|
def show_man_page(self):
"\n Print the app's man page.\n "
print(Gstr_title)
print(Gstr_synopsis)
| -1,878,531,900,290,933,500
|
Print the app's man page.
|
mri_convert_ppc64/mri_convert_ppc64.py
|
show_man_page
|
quinnyyy/pl-mri_convert_ppc64
|
python
|
def show_man_page(self):
"\n \n "
print(Gstr_title)
print(Gstr_synopsis)
|
def prepare_data(seqs_x, seqs_y=None, cuda=False, batch_first=True):
"\n Args:\n eval ('bool'): indicator for eval/infer.\n\n Returns:\n\n "
def _np_pad_batch_2D(samples, pad, batch_first=True, cuda=True):
batch_size = len(samples)
sizes = [len(s) for s in samples]
max_size = max(sizes)
x_np = np.full((batch_size, max_size), fill_value=pad, dtype='int64')
for ii in range(batch_size):
x_np[ii, :sizes[ii]] = samples[ii]
if (batch_first is False):
x_np = np.transpose(x_np, [1, 0])
x = torch.tensor(x_np)
if (cuda is True):
x = x.cuda()
return x
seqs_x = list(map((lambda s: (([BOS] + s) + [EOS])), seqs_x))
x = _np_pad_batch_2D(samples=seqs_x, pad=PAD, cuda=cuda, batch_first=batch_first)
if (seqs_y is None):
return x
seqs_y = list(map((lambda s: (([BOS] + s) + [EOS])), seqs_y))
y = _np_pad_batch_2D(seqs_y, pad=PAD, cuda=cuda, batch_first=batch_first)
return (x, y)
| 6,239,063,456,486,674,000
|
Args:
eval ('bool'): indicator for eval/infer.
Returns:
|
src/tasks/lm.py
|
prepare_data
|
skysky77/MGNMT
|
python
|
def prepare_data(seqs_x, seqs_y=None, cuda=False, batch_first=True):
"\n Args:\n eval ('bool'): indicator for eval/infer.\n\n Returns:\n\n "
def _np_pad_batch_2D(samples, pad, batch_first=True, cuda=True):
batch_size = len(samples)
sizes = [len(s) for s in samples]
max_size = max(sizes)
x_np = np.full((batch_size, max_size), fill_value=pad, dtype='int64')
for ii in range(batch_size):
x_np[ii, :sizes[ii]] = samples[ii]
if (batch_first is False):
x_np = np.transpose(x_np, [1, 0])
x = torch.tensor(x_np)
if (cuda is True):
x = x.cuda()
return x
seqs_x = list(map((lambda s: (([BOS] + s) + [EOS])), seqs_x))
x = _np_pad_batch_2D(samples=seqs_x, pad=PAD, cuda=cuda, batch_first=batch_first)
if (seqs_y is None):
return x
seqs_y = list(map((lambda s: (([BOS] + s) + [EOS])), seqs_y))
y = _np_pad_batch_2D(seqs_y, pad=PAD, cuda=cuda, batch_first=batch_first)
return (x, y)
|
def compute_forward(model, critic, seqs_x, eval=False, normalization=1.0, norm_by_words=False):
'\n :type model: nn.Module\n\n :type critic: NMTCriterion\n '
x_inp = seqs_x[:, :(- 1)].contiguous()
x_label = seqs_x[:, 1:].contiguous()
words_norm = x_label.ne(PAD).float().sum(1)
if (not eval):
model.train()
critic.train()
with torch.enable_grad():
log_probs = model(x_inp)
loss = critic(inputs=log_probs, labels=x_label, reduce=False, normalization=normalization)
if norm_by_words:
loss = loss.div(words_norm).sum()
else:
loss = loss.sum()
torch.autograd.backward(loss)
return loss.item()
else:
model.eval()
critic.eval()
with torch.no_grad():
log_probs = model(x_inp)
loss = critic(inputs=log_probs, labels=x_label, normalization=normalization, reduce=True)
return loss.item()
| -5,583,026,677,254,529,000
|
:type model: nn.Module
:type critic: NMTCriterion
|
src/tasks/lm.py
|
compute_forward
|
skysky77/MGNMT
|
python
|
def compute_forward(model, critic, seqs_x, eval=False, normalization=1.0, norm_by_words=False):
'\n :type model: nn.Module\n\n :type critic: NMTCriterion\n '
x_inp = seqs_x[:, :(- 1)].contiguous()
x_label = seqs_x[:, 1:].contiguous()
words_norm = x_label.ne(PAD).float().sum(1)
if (not eval):
model.train()
critic.train()
with torch.enable_grad():
log_probs = model(x_inp)
loss = critic(inputs=log_probs, labels=x_label, reduce=False, normalization=normalization)
if norm_by_words:
loss = loss.div(words_norm).sum()
else:
loss = loss.sum()
torch.autograd.backward(loss)
return loss.item()
else:
model.eval()
critic.eval()
with torch.no_grad():
log_probs = model(x_inp)
loss = critic(inputs=log_probs, labels=x_label, normalization=normalization, reduce=True)
return loss.item()
|
def loss_validation(model, critic, valid_iterator):
'\n :type model: Transformer\n\n :type critic: NMTCriterion\n\n :type valid_iterator: DataIterator\n '
n_sents = 0
n_tokens = 0.0
sum_loss = 0.0
valid_iter = valid_iterator.build_generator()
for batch in valid_iter:
(_, seqs_x) = batch
n_sents += len(seqs_x)
n_tokens += sum((len(s) for s in seqs_x))
x = prepare_data(seqs_x, cuda=GlobalNames.USE_GPU)
loss = compute_forward(model=model, critic=critic, seqs_x=x, eval=True)
if np.isnan(loss):
WARN('NaN detected!')
sum_loss += float(loss)
return float((sum_loss / n_sents))
| -1,263,063,538,261,875,700
|
:type model: Transformer
:type critic: NMTCriterion
:type valid_iterator: DataIterator
|
src/tasks/lm.py
|
loss_validation
|
skysky77/MGNMT
|
python
|
def loss_validation(model, critic, valid_iterator):
'\n :type model: Transformer\n\n :type critic: NMTCriterion\n\n :type valid_iterator: DataIterator\n '
n_sents = 0
n_tokens = 0.0
sum_loss = 0.0
valid_iter = valid_iterator.build_generator()
for batch in valid_iter:
(_, seqs_x) = batch
n_sents += len(seqs_x)
n_tokens += sum((len(s) for s in seqs_x))
x = prepare_data(seqs_x, cuda=GlobalNames.USE_GPU)
loss = compute_forward(model=model, critic=critic, seqs_x=x, eval=True)
if np.isnan(loss):
WARN('NaN detected!')
sum_loss += float(loss)
return float((sum_loss / n_sents))
|
def load_pretrained_model(nmt_model, pretrain_path, device, exclude_prefix=None):
"\n Args:\n nmt_model: model.\n pretrain_path ('str'): path to pretrained model.\n map_dict ('dict'): mapping specific parameter names to those names\n in current model.\n exclude_prefix ('dict'): excluding parameters with specific names\n for pretraining.\n\n Raises:\n ValueError: Size not match, parameter name not match or others.\n\n "
if (exclude_prefix is None):
exclude_prefix = []
if (pretrain_path != ''):
INFO('Loading pretrained model from {}'.format(pretrain_path))
pretrain_params = torch.load(pretrain_path, map_location=device)
for (name, params) in pretrain_params.items():
flag = False
for pp in exclude_prefix:
if name.startswith(pp):
flag = True
break
if flag:
continue
INFO('Loading param: {}...'.format(name))
try:
nmt_model.load_state_dict({name: params}, strict=False)
except Exception as e:
WARN('{}: {}'.format(str(Exception), e))
INFO('Pretrained model loaded.')
| 2,162,646,821,228,752,000
|
Args:
nmt_model: model.
pretrain_path ('str'): path to pretrained model.
map_dict ('dict'): mapping specific parameter names to those names
in current model.
exclude_prefix ('dict'): excluding parameters with specific names
for pretraining.
Raises:
ValueError: Size not match, parameter name not match or others.
|
src/tasks/lm.py
|
load_pretrained_model
|
skysky77/MGNMT
|
python
|
def load_pretrained_model(nmt_model, pretrain_path, device, exclude_prefix=None):
"\n Args:\n nmt_model: model.\n pretrain_path ('str'): path to pretrained model.\n map_dict ('dict'): mapping specific parameter names to those names\n in current model.\n exclude_prefix ('dict'): excluding parameters with specific names\n for pretraining.\n\n Raises:\n ValueError: Size not match, parameter name not match or others.\n\n "
if (exclude_prefix is None):
exclude_prefix = []
if (pretrain_path != ):
INFO('Loading pretrained model from {}'.format(pretrain_path))
pretrain_params = torch.load(pretrain_path, map_location=device)
for (name, params) in pretrain_params.items():
flag = False
for pp in exclude_prefix:
if name.startswith(pp):
flag = True
break
if flag:
continue
INFO('Loading param: {}...'.format(name))
try:
nmt_model.load_state_dict({name: params}, strict=False)
except Exception as e:
WARN('{}: {}'.format(str(Exception), e))
INFO('Pretrained model loaded.')
|
def train(FLAGS):
'\n FLAGS:\n saveto: str\n reload: store_true\n config_path: str\n pretrain_path: str, default=""\n model_name: str\n log_path: str\n '
write_log_to_file(os.path.join(FLAGS.log_path, ('%s.log' % time.strftime('%Y%m%d-%H%M%S'))))
GlobalNames.USE_GPU = FLAGS.use_gpu
if GlobalNames.USE_GPU:
CURRENT_DEVICE = 'cpu'
else:
CURRENT_DEVICE = 'cuda:0'
config_path = os.path.abspath(FLAGS.config_path)
with open(config_path.strip()) as f:
configs = yaml.load(f)
INFO(pretty_configs(configs))
configs = default_configs(configs)
data_configs = configs['data_configs']
model_configs = configs['model_configs']
optimizer_configs = configs['optimizer_configs']
training_configs = configs['training_configs']
GlobalNames.SEED = training_configs['seed']
set_seed(GlobalNames.SEED)
best_model_prefix = os.path.join(FLAGS.saveto, (FLAGS.model_name + GlobalNames.MY_BEST_MODEL_SUFFIX))
timer = Timer()
INFO('Loading data...')
timer.tic()
vocab_src = Vocabulary(**data_configs['vocabularies'][0])
train_batch_size = (training_configs['batch_size'] * max(1, training_configs['update_cycle']))
train_buffer_size = (training_configs['buffer_size'] * max(1, training_configs['update_cycle']))
train_bitext_dataset = ZipDataset(TextLineDataset(data_path=data_configs['train_data'][0], vocabulary=vocab_src, max_len=data_configs['max_len'][0]), shuffle=training_configs['shuffle'])
valid_bitext_dataset = ZipDataset(TextLineDataset(data_path=data_configs['valid_data'][0], vocabulary=vocab_src))
training_iterator = DataIterator(dataset=train_bitext_dataset, batch_size=train_batch_size, use_bucket=training_configs['use_bucket'], buffer_size=train_buffer_size, batching_func=training_configs['batching_key'])
valid_iterator = DataIterator(dataset=valid_bitext_dataset, batch_size=training_configs['valid_batch_size'], use_bucket=True, buffer_size=100000, numbering=True)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
lrate = optimizer_configs['learning_rate']
is_early_stop = False
model_collections = Collections()
checkpoint_saver = Saver(save_prefix='{0}.ckpt'.format(os.path.join(FLAGS.saveto, FLAGS.model_name)), num_max_keeping=training_configs['num_kept_checkpoints'])
best_model_saver = BestKSaver(save_prefix='{0}.best'.format(os.path.join(FLAGS.saveto, FLAGS.model_name)), num_max_keeping=training_configs['num_kept_best_checkpoints'])
INFO('Building model...')
timer.tic()
nmt_model = build_model(n_words=vocab_src.max_n_words, **model_configs)
INFO(nmt_model)
params_total = sum([p.numel() for (n, p) in nmt_model.named_parameters()])
params_with_embedding = sum([p.numel() for (n, p) in nmt_model.named_parameters() if (n.find('embedding') == (- 1))])
INFO('Total parameters: {}'.format(params_total))
INFO('Total parameters (excluding word embeddings): {}'.format(params_with_embedding))
critic = NMTCriterion(label_smoothing=model_configs['label_smoothing'])
INFO(critic)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
if GlobalNames.USE_GPU:
nmt_model = nmt_model.cuda()
critic = critic.cuda()
load_pretrained_model(nmt_model, FLAGS.pretrain_path, exclude_prefix=None, device=CURRENT_DEVICE)
INFO('Building Optimizer...')
optim = Optimizer(name=optimizer_configs['optimizer'], model=nmt_model, lr=lrate, grad_clip=optimizer_configs['grad_clip'], optim_args=optimizer_configs['optimizer_params'])
if (optimizer_configs['schedule_method'] is not None):
if (optimizer_configs['schedule_method'] == 'loss'):
scheduler = ReduceOnPlateauScheduler(optimizer=optim, **optimizer_configs['scheduler_configs'])
elif (optimizer_configs['schedule_method'] == 'noam'):
scheduler = NoamScheduler(optimizer=optim, **optimizer_configs['scheduler_configs'])
elif (optimizer_configs['schedule_method'] == 'rsqrt'):
scheduler = RsqrtScheduler(optimizer=optim, **optimizer_configs['scheduler_configs'])
else:
WARN('Unknown scheduler name {0}. Do not use lr_scheduling.'.format(optimizer_configs['schedule_method']))
scheduler = None
else:
scheduler = None
if (training_configs['moving_average_method'] is not None):
ma = MovingAverage(moving_average_method=training_configs['moving_average_method'], named_params=nmt_model.named_parameters(), alpha=training_configs['moving_average_alpha'])
else:
ma = None
INFO('Done. Elapsed time {0}'.format(timer.toc()))
if FLAGS.reload:
checkpoint_saver.load_latest(model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma)
eidx = model_collections.get_collection('eidx', [0])[(- 1)]
uidx = model_collections.get_collection('uidx', [0])[(- 1)]
bad_count = model_collections.get_collection('bad_count', [0])[(- 1)]
oom_count = model_collections.get_collection('oom_count', [0])[(- 1)]
summary_writer = SummaryWriter(log_dir=FLAGS.log_path)
cum_samples = 0
cum_words = 0
valid_loss = best_valid_loss = float('inf')
saving_files = []
timer_for_speed = Timer()
timer_for_speed.tic()
INFO('Begin training...')
while True:
summary_writer.add_scalar('Epoch', (eidx + 1), uidx)
training_iter = training_iterator.build_generator()
training_progress_bar = tqdm(desc=' - (Epc {}, Upd {}) '.format(eidx, uidx), total=len(training_iterator), unit='sents')
for batch in training_iter:
uidx += 1
if ((optimizer_configs['schedule_method'] is not None) and (optimizer_configs['schedule_method'] != 'loss')):
scheduler.step(global_step=uidx)
seqs_x = batch
n_samples_t = len(seqs_x)
n_words_t = sum((len(s) for s in seqs_x))
cum_samples += n_samples_t
cum_words += n_words_t
train_loss = 0.0
optim.zero_grad()
try:
for (seqs_x_t,) in split_shard(seqs_x, split_size=training_configs['update_cycle']):
x = prepare_data(seqs_x_t, cuda=GlobalNames.USE_GPU)
loss = compute_forward(model=nmt_model, critic=critic, seqs_x=x, eval=False, normalization=n_samples_t, norm_by_words=training_configs['norm_by_words'])
train_loss += (loss / x.size(1))
optim.step()
except RuntimeError as e:
if ('out of memory' in str(e)):
print('| WARNING: ran out of memory, skipping batch')
oom_count += 1
optim.zero_grad()
else:
raise e
if ((ma is not None) and (eidx >= training_configs['moving_average_start_epoch'])):
ma.step()
training_progress_bar.update(n_samples_t)
training_progress_bar.set_description(' - (Epc {}, Upd {}) '.format(eidx, uidx))
training_progress_bar.set_postfix_str('TrainLoss: {:.2f}, ValidLoss(best): {:.2f} ({:.2f})'.format(train_loss, valid_loss, best_valid_loss))
summary_writer.add_scalar('train_loss', scalar_value=train_loss, global_step=uidx)
if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['disp_freq']):
words_per_sec = (cum_words / timer.toc(return_seconds=True))
sents_per_sec = (cum_samples / timer.toc(return_seconds=True))
lrate = list(optim.get_lrate())[0]
summary_writer.add_scalar('Speed(words/sec)', scalar_value=words_per_sec, global_step=uidx)
summary_writer.add_scalar('Speed(sents/sen)', scalar_value=sents_per_sec, global_step=uidx)
summary_writer.add_scalar('lrate', scalar_value=lrate, global_step=uidx)
summary_writer.add_scalar('oom_count', scalar_value=oom_count, global_step=uidx)
timer.tic()
cum_words = 0
cum_samples = 0
if should_trigger_by_steps(global_step=uidx, n_epoch=eidx, every_n_step=training_configs['loss_valid_freq'], debug=FLAGS.debug):
if (ma is not None):
origin_state_dict = deepcopy(nmt_model.state_dict())
nmt_model.load_state_dict(ma.export_ma_params(), strict=False)
valid_loss = loss_validation(model=nmt_model, critic=critic, valid_iterator=valid_iterator)
model_collections.add_to_collection('history_losses', valid_loss)
min_history_loss = np.array(model_collections.get_collection('history_losses')).min()
summary_writer.add_scalar('loss', valid_loss, global_step=uidx)
summary_writer.add_scalar('best_loss', min_history_loss, global_step=uidx)
best_valid_loss = min_history_loss
if (ma is not None):
nmt_model.load_state_dict(origin_state_dict)
del origin_state_dict
if (optimizer_configs['schedule_method'] == 'loss'):
scheduler.step(global_step=uidx, metric=best_valid_loss)
if (valid_loss < best_valid_loss):
bad_count = 0
if (is_early_stop is False):
torch.save(nmt_model.state_dict(), (best_model_prefix + '.final'))
model_collections.add_to_collection('uidx', uidx)
model_collections.add_to_collection('eidx', eidx)
model_collections.add_to_collection('bad_count', bad_count)
best_model_saver.save(global_step=uidx, metric=valid_loss, model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma)
else:
bad_count += 1
if ((bad_count >= training_configs['early_stop_patience']) and (eidx > 0)):
is_early_stop = True
WARN('Early Stop!')
summary_writer.add_scalar('bad_count', bad_count, uidx)
INFO('{0} Loss: {1:.2f} lrate: {2:6f} patience: {3}'.format(uidx, valid_loss, lrate, bad_count))
if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['save_freq'], debug=FLAGS.debug):
model_collections.add_to_collection('uidx', uidx)
model_collections.add_to_collection('eidx', eidx)
model_collections.add_to_collection('bad_count', bad_count)
if (not is_early_stop):
checkpoint_saver.save(global_step=uidx, model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma)
training_progress_bar.close()
eidx += 1
if (eidx > training_configs['max_epochs']):
break
| 350,397,659,040,292,100
|
FLAGS:
saveto: str
reload: store_true
config_path: str
pretrain_path: str, default=""
model_name: str
log_path: str
|
src/tasks/lm.py
|
train
|
skysky77/MGNMT
|
python
|
def train(FLAGS):
'\n FLAGS:\n saveto: str\n reload: store_true\n config_path: str\n pretrain_path: str, default=\n model_name: str\n log_path: str\n '
write_log_to_file(os.path.join(FLAGS.log_path, ('%s.log' % time.strftime('%Y%m%d-%H%M%S'))))
GlobalNames.USE_GPU = FLAGS.use_gpu
if GlobalNames.USE_GPU:
CURRENT_DEVICE = 'cpu'
else:
CURRENT_DEVICE = 'cuda:0'
config_path = os.path.abspath(FLAGS.config_path)
with open(config_path.strip()) as f:
configs = yaml.load(f)
INFO(pretty_configs(configs))
configs = default_configs(configs)
data_configs = configs['data_configs']
model_configs = configs['model_configs']
optimizer_configs = configs['optimizer_configs']
training_configs = configs['training_configs']
GlobalNames.SEED = training_configs['seed']
set_seed(GlobalNames.SEED)
best_model_prefix = os.path.join(FLAGS.saveto, (FLAGS.model_name + GlobalNames.MY_BEST_MODEL_SUFFIX))
timer = Timer()
INFO('Loading data...')
timer.tic()
vocab_src = Vocabulary(**data_configs['vocabularies'][0])
train_batch_size = (training_configs['batch_size'] * max(1, training_configs['update_cycle']))
train_buffer_size = (training_configs['buffer_size'] * max(1, training_configs['update_cycle']))
train_bitext_dataset = ZipDataset(TextLineDataset(data_path=data_configs['train_data'][0], vocabulary=vocab_src, max_len=data_configs['max_len'][0]), shuffle=training_configs['shuffle'])
valid_bitext_dataset = ZipDataset(TextLineDataset(data_path=data_configs['valid_data'][0], vocabulary=vocab_src))
training_iterator = DataIterator(dataset=train_bitext_dataset, batch_size=train_batch_size, use_bucket=training_configs['use_bucket'], buffer_size=train_buffer_size, batching_func=training_configs['batching_key'])
valid_iterator = DataIterator(dataset=valid_bitext_dataset, batch_size=training_configs['valid_batch_size'], use_bucket=True, buffer_size=100000, numbering=True)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
lrate = optimizer_configs['learning_rate']
is_early_stop = False
model_collections = Collections()
checkpoint_saver = Saver(save_prefix='{0}.ckpt'.format(os.path.join(FLAGS.saveto, FLAGS.model_name)), num_max_keeping=training_configs['num_kept_checkpoints'])
best_model_saver = BestKSaver(save_prefix='{0}.best'.format(os.path.join(FLAGS.saveto, FLAGS.model_name)), num_max_keeping=training_configs['num_kept_best_checkpoints'])
INFO('Building model...')
timer.tic()
nmt_model = build_model(n_words=vocab_src.max_n_words, **model_configs)
INFO(nmt_model)
params_total = sum([p.numel() for (n, p) in nmt_model.named_parameters()])
params_with_embedding = sum([p.numel() for (n, p) in nmt_model.named_parameters() if (n.find('embedding') == (- 1))])
INFO('Total parameters: {}'.format(params_total))
INFO('Total parameters (excluding word embeddings): {}'.format(params_with_embedding))
critic = NMTCriterion(label_smoothing=model_configs['label_smoothing'])
INFO(critic)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
if GlobalNames.USE_GPU:
nmt_model = nmt_model.cuda()
critic = critic.cuda()
load_pretrained_model(nmt_model, FLAGS.pretrain_path, exclude_prefix=None, device=CURRENT_DEVICE)
INFO('Building Optimizer...')
optim = Optimizer(name=optimizer_configs['optimizer'], model=nmt_model, lr=lrate, grad_clip=optimizer_configs['grad_clip'], optim_args=optimizer_configs['optimizer_params'])
if (optimizer_configs['schedule_method'] is not None):
if (optimizer_configs['schedule_method'] == 'loss'):
scheduler = ReduceOnPlateauScheduler(optimizer=optim, **optimizer_configs['scheduler_configs'])
elif (optimizer_configs['schedule_method'] == 'noam'):
scheduler = NoamScheduler(optimizer=optim, **optimizer_configs['scheduler_configs'])
elif (optimizer_configs['schedule_method'] == 'rsqrt'):
scheduler = RsqrtScheduler(optimizer=optim, **optimizer_configs['scheduler_configs'])
else:
WARN('Unknown scheduler name {0}. Do not use lr_scheduling.'.format(optimizer_configs['schedule_method']))
scheduler = None
else:
scheduler = None
if (training_configs['moving_average_method'] is not None):
ma = MovingAverage(moving_average_method=training_configs['moving_average_method'], named_params=nmt_model.named_parameters(), alpha=training_configs['moving_average_alpha'])
else:
ma = None
INFO('Done. Elapsed time {0}'.format(timer.toc()))
if FLAGS.reload:
checkpoint_saver.load_latest(model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma)
eidx = model_collections.get_collection('eidx', [0])[(- 1)]
uidx = model_collections.get_collection('uidx', [0])[(- 1)]
bad_count = model_collections.get_collection('bad_count', [0])[(- 1)]
oom_count = model_collections.get_collection('oom_count', [0])[(- 1)]
summary_writer = SummaryWriter(log_dir=FLAGS.log_path)
cum_samples = 0
cum_words = 0
valid_loss = best_valid_loss = float('inf')
saving_files = []
timer_for_speed = Timer()
timer_for_speed.tic()
INFO('Begin training...')
while True:
summary_writer.add_scalar('Epoch', (eidx + 1), uidx)
training_iter = training_iterator.build_generator()
training_progress_bar = tqdm(desc=' - (Epc {}, Upd {}) '.format(eidx, uidx), total=len(training_iterator), unit='sents')
for batch in training_iter:
uidx += 1
if ((optimizer_configs['schedule_method'] is not None) and (optimizer_configs['schedule_method'] != 'loss')):
scheduler.step(global_step=uidx)
seqs_x = batch
n_samples_t = len(seqs_x)
n_words_t = sum((len(s) for s in seqs_x))
cum_samples += n_samples_t
cum_words += n_words_t
train_loss = 0.0
optim.zero_grad()
try:
for (seqs_x_t,) in split_shard(seqs_x, split_size=training_configs['update_cycle']):
x = prepare_data(seqs_x_t, cuda=GlobalNames.USE_GPU)
loss = compute_forward(model=nmt_model, critic=critic, seqs_x=x, eval=False, normalization=n_samples_t, norm_by_words=training_configs['norm_by_words'])
train_loss += (loss / x.size(1))
optim.step()
except RuntimeError as e:
if ('out of memory' in str(e)):
print('| WARNING: ran out of memory, skipping batch')
oom_count += 1
optim.zero_grad()
else:
raise e
if ((ma is not None) and (eidx >= training_configs['moving_average_start_epoch'])):
ma.step()
training_progress_bar.update(n_samples_t)
training_progress_bar.set_description(' - (Epc {}, Upd {}) '.format(eidx, uidx))
training_progress_bar.set_postfix_str('TrainLoss: {:.2f}, ValidLoss(best): {:.2f} ({:.2f})'.format(train_loss, valid_loss, best_valid_loss))
summary_writer.add_scalar('train_loss', scalar_value=train_loss, global_step=uidx)
if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['disp_freq']):
words_per_sec = (cum_words / timer.toc(return_seconds=True))
sents_per_sec = (cum_samples / timer.toc(return_seconds=True))
lrate = list(optim.get_lrate())[0]
summary_writer.add_scalar('Speed(words/sec)', scalar_value=words_per_sec, global_step=uidx)
summary_writer.add_scalar('Speed(sents/sen)', scalar_value=sents_per_sec, global_step=uidx)
summary_writer.add_scalar('lrate', scalar_value=lrate, global_step=uidx)
summary_writer.add_scalar('oom_count', scalar_value=oom_count, global_step=uidx)
timer.tic()
cum_words = 0
cum_samples = 0
if should_trigger_by_steps(global_step=uidx, n_epoch=eidx, every_n_step=training_configs['loss_valid_freq'], debug=FLAGS.debug):
if (ma is not None):
origin_state_dict = deepcopy(nmt_model.state_dict())
nmt_model.load_state_dict(ma.export_ma_params(), strict=False)
valid_loss = loss_validation(model=nmt_model, critic=critic, valid_iterator=valid_iterator)
model_collections.add_to_collection('history_losses', valid_loss)
min_history_loss = np.array(model_collections.get_collection('history_losses')).min()
summary_writer.add_scalar('loss', valid_loss, global_step=uidx)
summary_writer.add_scalar('best_loss', min_history_loss, global_step=uidx)
best_valid_loss = min_history_loss
if (ma is not None):
nmt_model.load_state_dict(origin_state_dict)
del origin_state_dict
if (optimizer_configs['schedule_method'] == 'loss'):
scheduler.step(global_step=uidx, metric=best_valid_loss)
if (valid_loss < best_valid_loss):
bad_count = 0
if (is_early_stop is False):
torch.save(nmt_model.state_dict(), (best_model_prefix + '.final'))
model_collections.add_to_collection('uidx', uidx)
model_collections.add_to_collection('eidx', eidx)
model_collections.add_to_collection('bad_count', bad_count)
best_model_saver.save(global_step=uidx, metric=valid_loss, model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma)
else:
bad_count += 1
if ((bad_count >= training_configs['early_stop_patience']) and (eidx > 0)):
is_early_stop = True
WARN('Early Stop!')
summary_writer.add_scalar('bad_count', bad_count, uidx)
INFO('{0} Loss: {1:.2f} lrate: {2:6f} patience: {3}'.format(uidx, valid_loss, lrate, bad_count))
if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['save_freq'], debug=FLAGS.debug):
model_collections.add_to_collection('uidx', uidx)
model_collections.add_to_collection('eidx', eidx)
model_collections.add_to_collection('bad_count', bad_count)
if (not is_early_stop):
checkpoint_saver.save(global_step=uidx, model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma)
training_progress_bar.close()
eidx += 1
if (eidx > training_configs['max_epochs']):
break
|
def __init__(self, *, section_proxy: Callable[([], List[SectionMsg])], lane_proxy: Callable[([int], LaneMsg)], obstacle_proxy: Callable[([int], List[LabeledPolygonMsg])], surface_marking_proxy: Callable[([int], List[LabeledPolygonMsg])], parking_proxy: Callable[([int], Any)], intersection_proxy: Callable[([int], Any)], overtaking_buffer: float=2, start_zone_buffer: float=1, end_zone_buffer: float=1.5, yield_distance: Tuple[(float, float)]=((- 0.6), (- 0.2))):
'Initialize zone speaker.\n\n Args:\n section_proxy: Returns all sections when called.\n lane_proxy: Returns a LaneMsg for each section.\n obstacle_proxy: function which returns obstacles in a section.\n surface_marking_proxy: function which returns surface_markings in a section.\n parking_proxy: function which returns parking msg in a section.\n intersection_proxy: function which returns intersection msg in a section.\n parking_spot_buffer: buffer around parking spots in which a parking attempt\n is also accepted\n overtaking_buffer: buffer around obstacles that the car is allowed to overtake\n start_zone_buffer: beginning of the road that is considered as a start zone\n end_zone_buffer: end of the road that is considered as the end\n yield_distance: interval before intersections that the vehicle must yield in\n '
super().__init__(section_proxy=section_proxy, lane_proxy=lane_proxy, obstacle_proxy=obstacle_proxy, surface_marking_proxy=surface_marking_proxy, intersection_proxy=intersection_proxy)
self.get_parking_msgs = parking_proxy
self.overtaking_buffer = overtaking_buffer
self.start_zone_buffer = start_zone_buffer
self.end_zone_buffer = end_zone_buffer
self.yield_distance = yield_distance
self.total_length = self.middle_line.length
| 3,897,407,619,816,136,000
|
Initialize zone speaker.
Args:
section_proxy: Returns all sections when called.
lane_proxy: Returns a LaneMsg for each section.
obstacle_proxy: function which returns obstacles in a section.
surface_marking_proxy: function which returns surface_markings in a section.
parking_proxy: function which returns parking msg in a section.
intersection_proxy: function which returns intersection msg in a section.
parking_spot_buffer: buffer around parking spots in which a parking attempt
is also accepted
overtaking_buffer: buffer around obstacles that the car is allowed to overtake
start_zone_buffer: beginning of the road that is considered as a start zone
end_zone_buffer: end of the road that is considered as the end
yield_distance: interval before intersections that the vehicle must yield in
|
simulation/src/simulation_evaluation/src/speaker/speakers/zone.py
|
__init__
|
KITcar-Team/kitcar-gazebo-simulation
|
python
|
def __init__(self, *, section_proxy: Callable[([], List[SectionMsg])], lane_proxy: Callable[([int], LaneMsg)], obstacle_proxy: Callable[([int], List[LabeledPolygonMsg])], surface_marking_proxy: Callable[([int], List[LabeledPolygonMsg])], parking_proxy: Callable[([int], Any)], intersection_proxy: Callable[([int], Any)], overtaking_buffer: float=2, start_zone_buffer: float=1, end_zone_buffer: float=1.5, yield_distance: Tuple[(float, float)]=((- 0.6), (- 0.2))):
'Initialize zone speaker.\n\n Args:\n section_proxy: Returns all sections when called.\n lane_proxy: Returns a LaneMsg for each section.\n obstacle_proxy: function which returns obstacles in a section.\n surface_marking_proxy: function which returns surface_markings in a section.\n parking_proxy: function which returns parking msg in a section.\n intersection_proxy: function which returns intersection msg in a section.\n parking_spot_buffer: buffer around parking spots in which a parking attempt\n is also accepted\n overtaking_buffer: buffer around obstacles that the car is allowed to overtake\n start_zone_buffer: beginning of the road that is considered as a start zone\n end_zone_buffer: end of the road that is considered as the end\n yield_distance: interval before intersections that the vehicle must yield in\n '
super().__init__(section_proxy=section_proxy, lane_proxy=lane_proxy, obstacle_proxy=obstacle_proxy, surface_marking_proxy=surface_marking_proxy, intersection_proxy=intersection_proxy)
self.get_parking_msgs = parking_proxy
self.overtaking_buffer = overtaking_buffer
self.start_zone_buffer = start_zone_buffer
self.end_zone_buffer = end_zone_buffer
self.yield_distance = yield_distance
self.total_length = self.middle_line.length
|
@functools.cached_property
def overtaking_zones(self) -> List[Tuple[(float, float)]]:
'Intervals in which the car is allowed to overtake along the\n :py:attr:`Speaker.middle_line`.'
obstacles = list((lp.frame for sec in self.sections if (sec.type != road_section_type.PARKING_AREA) for lp in self.get_obstacles_in_section(sec.id)))
surface_markings = list((surface_marking for sec in self.sections for surface_marking in self.get_surface_markings_in_section(sec.id)))
blocked_areas = [sm.frame for sm in surface_markings if (sm.id_ == SurfaceMarking.BLOCKED_AREA[0])]
intervals = list((self.get_interval_for_polygon(obs) for obs in (obstacles + blocked_areas)))
if (len(intervals) == 0):
return []
zone_intervals = [((intervals[0][0] - self.overtaking_buffer), (intervals[0][1] + self.overtaking_buffer))]
for (start, end) in intervals[1:]:
last = zone_intervals[(- 1)]
if ((start - self.overtaking_buffer) < last[1]):
zone_intervals[(- 1)] = (last[0], (end + self.overtaking_buffer))
else:
zone_intervals.append(((start - self.overtaking_buffer), (end + self.overtaking_buffer)))
return zone_intervals
| -8,128,807,900,204,974,000
|
Intervals in which the car is allowed to overtake along the
:py:attr:`Speaker.middle_line`.
|
simulation/src/simulation_evaluation/src/speaker/speakers/zone.py
|
overtaking_zones
|
KITcar-Team/kitcar-gazebo-simulation
|
python
|
@functools.cached_property
def overtaking_zones(self) -> List[Tuple[(float, float)]]:
'Intervals in which the car is allowed to overtake along the\n :py:attr:`Speaker.middle_line`.'
obstacles = list((lp.frame for sec in self.sections if (sec.type != road_section_type.PARKING_AREA) for lp in self.get_obstacles_in_section(sec.id)))
surface_markings = list((surface_marking for sec in self.sections for surface_marking in self.get_surface_markings_in_section(sec.id)))
blocked_areas = [sm.frame for sm in surface_markings if (sm.id_ == SurfaceMarking.BLOCKED_AREA[0])]
intervals = list((self.get_interval_for_polygon(obs) for obs in (obstacles + blocked_areas)))
if (len(intervals) == 0):
return []
zone_intervals = [((intervals[0][0] - self.overtaking_buffer), (intervals[0][1] + self.overtaking_buffer))]
for (start, end) in intervals[1:]:
last = zone_intervals[(- 1)]
if ((start - self.overtaking_buffer) < last[1]):
zone_intervals[(- 1)] = (last[0], (end + self.overtaking_buffer))
else:
zone_intervals.append(((start - self.overtaking_buffer), (end + self.overtaking_buffer)))
return zone_intervals
|
def _intersection_yield_zones(self, rule: int) -> List[Tuple[(float, float)]]:
'Intervals in which the car is supposed to halt/stop (in front of intersections).\n\n Args:\n rule: only intersections with this rule are considered\n '
intervals = []
for sec in self.sections:
if (sec.type != road_section_type.INTERSECTION):
continue
intersection_msg = self.get_intersection(sec.id)
arc_length = self.middle_line.project(Point(intersection_msg.south.middle_line[(- 1)]))
if (intersection_msg.rule == rule):
intervals.append(((arc_length + self.yield_distance[0]), (arc_length + self.yield_distance[1])))
return intervals
| -4,008,736,530,455,372,000
|
Intervals in which the car is supposed to halt/stop (in front of intersections).
Args:
rule: only intersections with this rule are considered
|
simulation/src/simulation_evaluation/src/speaker/speakers/zone.py
|
_intersection_yield_zones
|
KITcar-Team/kitcar-gazebo-simulation
|
python
|
def _intersection_yield_zones(self, rule: int) -> List[Tuple[(float, float)]]:
'Intervals in which the car is supposed to halt/stop (in front of intersections).\n\n Args:\n rule: only intersections with this rule are considered\n '
intervals = []
for sec in self.sections:
if (sec.type != road_section_type.INTERSECTION):
continue
intersection_msg = self.get_intersection(sec.id)
arc_length = self.middle_line.project(Point(intersection_msg.south.middle_line[(- 1)]))
if (intersection_msg.rule == rule):
intervals.append(((arc_length + self.yield_distance[0]), (arc_length + self.yield_distance[1])))
return intervals
|
@functools.cached_property
def stop_zones(self) -> List[Tuple[(float, float)]]:
'Intervals in which the car is supposed to stop (in front of intersections).'
return self._intersection_yield_zones(groundtruth_srv.IntersectionSrvResponse.STOP)
| -5,583,183,039,907,304,000
|
Intervals in which the car is supposed to stop (in front of intersections).
|
simulation/src/simulation_evaluation/src/speaker/speakers/zone.py
|
stop_zones
|
KITcar-Team/kitcar-gazebo-simulation
|
python
|
@functools.cached_property
def stop_zones(self) -> List[Tuple[(float, float)]]:
return self._intersection_yield_zones(groundtruth_srv.IntersectionSrvResponse.STOP)
|
@functools.cached_property
def halt_zones(self) -> List[Tuple[(float, float)]]:
'Intervals in which the car is supposed to halt (in front of intersections).'
return self._intersection_yield_zones(groundtruth_srv.IntersectionSrvResponse.YIELD)
| -2,817,556,553,382,975,500
|
Intervals in which the car is supposed to halt (in front of intersections).
|
simulation/src/simulation_evaluation/src/speaker/speakers/zone.py
|
halt_zones
|
KITcar-Team/kitcar-gazebo-simulation
|
python
|
@functools.cached_property
def halt_zones(self) -> List[Tuple[(float, float)]]:
return self._intersection_yield_zones(groundtruth_srv.IntersectionSrvResponse.YIELD)
|
def _inside_any_interval(self, intervals: List[Tuple[(float, float)]]) -> bool:
'Determine if the car is currently in any of the given intervals.'
beginnings = list((interval[0] for interval in intervals))
endings = list((interval[1] for interval in intervals))
b_idx = (bisect.bisect_left(beginnings, self.arc_length) - 1)
e_idx = (bisect.bisect_left(endings, self.arc_length) - 1)
return ((b_idx - e_idx) == 1)
| 2,748,530,073,120,588,000
|
Determine if the car is currently in any of the given intervals.
|
simulation/src/simulation_evaluation/src/speaker/speakers/zone.py
|
_inside_any_interval
|
KITcar-Team/kitcar-gazebo-simulation
|
python
|
def _inside_any_interval(self, intervals: List[Tuple[(float, float)]]) -> bool:
beginnings = list((interval[0] for interval in intervals))
endings = list((interval[1] for interval in intervals))
b_idx = (bisect.bisect_left(beginnings, self.arc_length) - 1)
e_idx = (bisect.bisect_left(endings, self.arc_length) - 1)
return ((b_idx - e_idx) == 1)
|
def speak(self) -> List[SpeakerMsg]:
'List of speaker msgs.\n\n Contents:\n * beginning of road -> :ref:`Speaker <speaker_msg>`.START_ZONE,\n end of road -> :ref:`Speaker <speaker_msg>`.END_ZONE,\n and in between -> :ref:`Speaker <speaker_msg>`.DRIVING_ZONE,\n * close to an obstacle -> :ref:`Speaker <speaker_msg>`.OVERTAKING_ZONE\n * before yield/stop lines -> :ref:`Speaker <speaker_msg>`.HALT_ZONE/SpeakerMsg.STOP_ZONE,\n * parking area -> :ref:`Speaker <speaker_msg>`.PARKING_ZONE\n '
msgs = super().speak()
def append_msg(t: int):
msg = SpeakerMsg()
msg.type = t
msgs.append(msg)
append_msg((SpeakerMsg.PARKING_ZONE if (self.current_section.type == road_section_type.PARKING_AREA) else SpeakerMsg.NO_PARKING_ZONE))
append_msg((SpeakerMsg.OVERTAKING_ZONE if self._inside_any_interval(self.overtaking_zones) else SpeakerMsg.NO_OVERTAKING_ZONE))
if (self.arc_length < self.start_zone_buffer):
append_msg(SpeakerMsg.START_ZONE)
elif ((self.arc_length + self.end_zone_buffer) < self.total_length):
append_msg(SpeakerMsg.DRIVING_ZONE)
else:
append_msg(SpeakerMsg.END_ZONE)
if self._inside_any_interval(self.halt_zones):
append_msg(SpeakerMsg.HALT_ZONE)
elif self._inside_any_interval(self.stop_zones):
append_msg(SpeakerMsg.STOP_ZONE)
else:
append_msg(SpeakerMsg.NO_STOP_ZONE)
for (x, msg) in reversed(self.speed_zones):
if ((x + 0.5) < self.arc_length):
append_msg(msg)
break
return msgs
| -1,370,087,488,015,486,500
|
List of speaker msgs.
Contents:
* beginning of road -> :ref:`Speaker <speaker_msg>`.START_ZONE,
end of road -> :ref:`Speaker <speaker_msg>`.END_ZONE,
and in between -> :ref:`Speaker <speaker_msg>`.DRIVING_ZONE,
* close to an obstacle -> :ref:`Speaker <speaker_msg>`.OVERTAKING_ZONE
* before yield/stop lines -> :ref:`Speaker <speaker_msg>`.HALT_ZONE/SpeakerMsg.STOP_ZONE,
* parking area -> :ref:`Speaker <speaker_msg>`.PARKING_ZONE
|
simulation/src/simulation_evaluation/src/speaker/speakers/zone.py
|
speak
|
KITcar-Team/kitcar-gazebo-simulation
|
python
|
def speak(self) -> List[SpeakerMsg]:
'List of speaker msgs.\n\n Contents:\n * beginning of road -> :ref:`Speaker <speaker_msg>`.START_ZONE,\n end of road -> :ref:`Speaker <speaker_msg>`.END_ZONE,\n and in between -> :ref:`Speaker <speaker_msg>`.DRIVING_ZONE,\n * close to an obstacle -> :ref:`Speaker <speaker_msg>`.OVERTAKING_ZONE\n * before yield/stop lines -> :ref:`Speaker <speaker_msg>`.HALT_ZONE/SpeakerMsg.STOP_ZONE,\n * parking area -> :ref:`Speaker <speaker_msg>`.PARKING_ZONE\n '
msgs = super().speak()
def append_msg(t: int):
msg = SpeakerMsg()
msg.type = t
msgs.append(msg)
append_msg((SpeakerMsg.PARKING_ZONE if (self.current_section.type == road_section_type.PARKING_AREA) else SpeakerMsg.NO_PARKING_ZONE))
append_msg((SpeakerMsg.OVERTAKING_ZONE if self._inside_any_interval(self.overtaking_zones) else SpeakerMsg.NO_OVERTAKING_ZONE))
if (self.arc_length < self.start_zone_buffer):
append_msg(SpeakerMsg.START_ZONE)
elif ((self.arc_length + self.end_zone_buffer) < self.total_length):
append_msg(SpeakerMsg.DRIVING_ZONE)
else:
append_msg(SpeakerMsg.END_ZONE)
if self._inside_any_interval(self.halt_zones):
append_msg(SpeakerMsg.HALT_ZONE)
elif self._inside_any_interval(self.stop_zones):
append_msg(SpeakerMsg.STOP_ZONE)
else:
append_msg(SpeakerMsg.NO_STOP_ZONE)
for (x, msg) in reversed(self.speed_zones):
if ((x + 0.5) < self.arc_length):
append_msg(msg)
break
return msgs
|
def processMedlineFolder(medlineFolder, outFolder):
'Basic function that iterates through abstracts in a medline file, do a basic word count and save to a file\n\n\tArgs:\n\t\tmedlineFolder (folder): Medline XML folder containing abstracts\n\t\toutFolder (folder): Folder to save output data to\n\tReturns:\n\t\tNothing\n\n\t'
abstractCount = 0
files = [f for f in listdir(medlineFolder) if isfile(join(medlineFolder, f))]
files = sorted([f for f in files if f.endswith('xml')])
outfile = join(outFolder, 'countWordsError.txt')
with open(outfile, 'a') as result:
for f in files:
print(('Processing %s' % f))
fullpath = join(medlineFolder, f)
for (event, elem) in etree.iterparse(fullpath, events=('start', 'end', 'start-ns', 'end-ns')):
if ((event == 'end') and (elem.tag == 'MedlineCitation')):
pmidElements = elem.findall('./PMID')
abstractElements = elem.findall('./Article/Abstract/AbstractText')
if ((len(pmidElements) != 1) or (len(abstractElements) != 1)):
continue
pmid = pmidElements[0].text
abstract = abstractElements[0].text
if (not (abstract is None)):
wordCount = len(abstract.split())
line = ('%s\t%d\n' % (pmid, wordCount))
result.write(line)
abstractCount += 1
print(('%d abstracts processed' % abstractCount))
| -3,890,652,773,604,347,000
|
Basic function that iterates through abstracts in a medline file, do a basic word count and save to a file
Args:
medlineFolder (folder): Medline XML folder containing abstracts
outFolder (folder): Folder to save output data to
Returns:
Nothing
|
server/tools/CountWordsError/0.1/CountWordsError.py
|
processMedlineFolder
|
NCBI-Hackathons/Autoupdating_PubMed_Corpus_for_NLP
|
python
|
def processMedlineFolder(medlineFolder, outFolder):
'Basic function that iterates through abstracts in a medline file, do a basic word count and save to a file\n\n\tArgs:\n\t\tmedlineFolder (folder): Medline XML folder containing abstracts\n\t\toutFolder (folder): Folder to save output data to\n\tReturns:\n\t\tNothing\n\n\t'
abstractCount = 0
files = [f for f in listdir(medlineFolder) if isfile(join(medlineFolder, f))]
files = sorted([f for f in files if f.endswith('xml')])
outfile = join(outFolder, 'countWordsError.txt')
with open(outfile, 'a') as result:
for f in files:
print(('Processing %s' % f))
fullpath = join(medlineFolder, f)
for (event, elem) in etree.iterparse(fullpath, events=('start', 'end', 'start-ns', 'end-ns')):
if ((event == 'end') and (elem.tag == 'MedlineCitation')):
pmidElements = elem.findall('./PMID')
abstractElements = elem.findall('./Article/Abstract/AbstractText')
if ((len(pmidElements) != 1) or (len(abstractElements) != 1)):
continue
pmid = pmidElements[0].text
abstract = abstractElements[0].text
if (not (abstract is None)):
wordCount = len(abstract.split())
line = ('%s\t%d\n' % (pmid, wordCount))
result.write(line)
abstractCount += 1
print(('%d abstracts processed' % abstractCount))
|
def mock_match(A, B):
'\n Checked for params on a mocked function is as expected\n\n It is necesary as sometimes we get a tuple and at the mock data we have\n lists.\n\n Examples:\n ```\n >>> mock_match("A", "A")\n True\n >>> mock_match("A", "B")\n False\n >>> mock_match(["A", "B", "C"], ["A", "B", "C"])\n True\n >>> mock_match(["A", "B", "C"], "*")\n True\n\n ```\n '
if (B == '*'):
return True
if isinstance(A, (tuple, list)):
return all((mock_match(a, b) for (a, b) in zip(A, B)))
return (A == B)
| 3,523,939,949,677,141,000
|
Checked for params on a mocked function is as expected
It is necesary as sometimes we get a tuple and at the mock data we have
lists.
Examples:
```
>>> mock_match("A", "A")
True
>>> mock_match("A", "B")
False
>>> mock_match(["A", "B", "C"], ["A", "B", "C"])
True
>>> mock_match(["A", "B", "C"], "*")
True
```
|
smock.py
|
mock_match
|
serverboards/serverboards-plugin-google-drive
|
python
|
def mock_match(A, B):
'\n Checked for params on a mocked function is as expected\n\n It is necesary as sometimes we get a tuple and at the mock data we have\n lists.\n\n Examples:\n ```\n >>> mock_match("A", "A")\n True\n >>> mock_match("A", "B")\n False\n >>> mock_match(["A", "B", "C"], ["A", "B", "C"])\n True\n >>> mock_match(["A", "B", "C"], "*")\n True\n\n ```\n '
if (B == '*'):
return True
if isinstance(A, (tuple, list)):
return all((mock_match(a, b) for (a, b) in zip(A, B)))
return (A == B)
|
def mock_res(name, data, args=[], kwargs={}):
'\n Given a name, data and call parameters, returns the mocked response\n\n If there is no matching response, raises an exception that can be used to\n prepare the mock data.\n\n This can be used for situations where you mock some function like data;\n for example at [Serverboards](https://serverboards.io), we use it to\n mock RPC calls.\n\n Its also used internally on every other mocking.\n '
data = data.get(name)
if (not data):
raise Exception(('unknown method for mocking: \n%s:\n - args: %s\n kwargs: %s\n response: ...\n' % (name, json.dumps(args), json.dumps(kwargs))))
for res in data:
if (mock_match(args, res.get('args')) and mock_match(kwargs, res.get('kwargs', {}))):
if ('error' in res):
raise Exception(res['error'])
response = res['response']
if isinstance(response, (int, str)):
return response
return wrapped(response)
raise Exception(('unknown data for mocking: \n%s:\n - args: %s\n kwargs: %s\n response: ...\n' % (name, json.dumps(args), json.dumps(kwargs))))
| -572,512,122,099,329,150
|
Given a name, data and call parameters, returns the mocked response
If there is no matching response, raises an exception that can be used to
prepare the mock data.
This can be used for situations where you mock some function like data;
for example at [Serverboards](https://serverboards.io), we use it to
mock RPC calls.
Its also used internally on every other mocking.
|
smock.py
|
mock_res
|
serverboards/serverboards-plugin-google-drive
|
python
|
def mock_res(name, data, args=[], kwargs={}):
'\n Given a name, data and call parameters, returns the mocked response\n\n If there is no matching response, raises an exception that can be used to\n prepare the mock data.\n\n This can be used for situations where you mock some function like data;\n for example at [Serverboards](https://serverboards.io), we use it to\n mock RPC calls.\n\n Its also used internally on every other mocking.\n '
data = data.get(name)
if (not data):
raise Exception(('unknown method for mocking: \n%s:\n - args: %s\n kwargs: %s\n response: ...\n' % (name, json.dumps(args), json.dumps(kwargs))))
for res in data:
if (mock_match(args, res.get('args')) and mock_match(kwargs, res.get('kwargs', {}))):
if ('error' in res):
raise Exception(res['error'])
response = res['response']
if isinstance(response, (int, str)):
return response
return wrapped(response)
raise Exception(('unknown data for mocking: \n%s:\n - args: %s\n kwargs: %s\n response: ...\n' % (name, json.dumps(args), json.dumps(kwargs))))
|
def mock_method(name, data):
'\n Returns a function that mocks an original function.\n '
def mockf(*args, **kwargs):
return mock_res(name, data, args, kwargs)
return mockf
| -4,421,090,414,963,443,700
|
Returns a function that mocks an original function.
|
smock.py
|
mock_method
|
serverboards/serverboards-plugin-google-drive
|
python
|
def mock_method(name, data):
'\n \n '
def mockf(*args, **kwargs):
return mock_res(name, data, args, kwargs)
return mockf
|
def mock_method_async(name, data):
'\n Returns an async function that mocks an original async function\n '
async def mockf(*args, **kwargs):
return mock_res(name, data, args, kwargs)
return mockf
| -1,206,476,654,475,070,200
|
Returns an async function that mocks an original async function
|
smock.py
|
mock_method_async
|
serverboards/serverboards-plugin-google-drive
|
python
|
def mock_method_async(name, data):
'\n \n '
async def mockf(*args, **kwargs):
return mock_res(name, data, args, kwargs)
return mockf
|
def mock_res(self, name, args=[], kwargs={}):
'\n Calls `mock_res`\n\n Mock by args:\n ```\n >>> smock = SMock("tests/data.yaml")\n >>> res = smock.mock_res("requests.get", ["https://mocked.url"])\n >>> res.status_code\n 200\n\n ```\n\n Using "*" as args, as fallback. As there is no kwargs, use default:\n ```\n >>> res = smock.mock_res("requests.get", ["https://error.mocked.url"])\n >>> res.status_code\n 404\n\n ```\n\n Using "*" as kwargs:\n ```\n >>> res = smock.mock_res("requests.get",\n ... ["https://mocked.url"],\n ... {\'data\': \'data\'})\n >>> res.status_code\n 200\n >>> res.content\n \'Mocked query\'\n\n ```\n '
return mock_res(name, self._data, args, kwargs)
| -3,832,548,049,595,161,600
|
Calls `mock_res`
Mock by args:
```
>>> smock = SMock("tests/data.yaml")
>>> res = smock.mock_res("requests.get", ["https://mocked.url"])
>>> res.status_code
200
```
Using "*" as args, as fallback. As there is no kwargs, use default:
```
>>> res = smock.mock_res("requests.get", ["https://error.mocked.url"])
>>> res.status_code
404
```
Using "*" as kwargs:
```
>>> res = smock.mock_res("requests.get",
... ["https://mocked.url"],
... {'data': 'data'})
>>> res.status_code
200
>>> res.content
'Mocked query'
```
|
smock.py
|
mock_res
|
serverboards/serverboards-plugin-google-drive
|
python
|
def mock_res(self, name, args=[], kwargs={}):
'\n Calls `mock_res`\n\n Mock by args:\n ```\n >>> smock = SMock("tests/data.yaml")\n >>> res = smock.mock_res("requests.get", ["https://mocked.url"])\n >>> res.status_code\n 200\n\n ```\n\n Using "*" as args, as fallback. As there is no kwargs, use default:\n ```\n >>> res = smock.mock_res("requests.get", ["https://error.mocked.url"])\n >>> res.status_code\n 404\n\n ```\n\n Using "*" as kwargs:\n ```\n >>> res = smock.mock_res("requests.get",\n ... ["https://mocked.url"],\n ... {\'data\': \'data\'})\n >>> res.status_code\n 200\n >>> res.content\n \'Mocked query\'\n\n ```\n '
return mock_res(name, self._data, args, kwargs)
|
def mock_method(self, name):
'\n Calls `mock_method`\n '
return mock_method(name, self._data)
| -1,218,371,837,164,396,000
|
Calls `mock_method`
|
smock.py
|
mock_method
|
serverboards/serverboards-plugin-google-drive
|
python
|
def mock_method(self, name):
'\n \n '
return mock_method(name, self._data)
|
async def mock_method_async(self, name):
'\n Calls `mock_method_async`\n '
return (await mock_method_async(name, self._data))
| -6,066,488,485,754,488,000
|
Calls `mock_method_async`
|
smock.py
|
mock_method_async
|
serverboards/serverboards-plugin-google-drive
|
python
|
async def mock_method_async(self, name):
'\n \n '
return (await mock_method_async(name, self._data))
|
def canon(smiles):
'Canonicalize SMILES for safety. If canonicalization ever changes this should remain consistent'
return Chem.MolToSmiles(Chem.MolFromSmiles(smiles))
| 7,068,212,702,851,012,000
|
Canonicalize SMILES for safety. If canonicalization ever changes this should remain consistent
|
tests/core/test_fragment.py
|
canon
|
trumanw/ScaffoldGraph
|
python
|
def canon(smiles):
return Chem.MolToSmiles(Chem.MolFromSmiles(smiles))
|
def __init__(self, **kwargs):
'Initialise the behaviour.'
services_interval = kwargs.pop('services_interval', DEFAULT_SERVICES_INTERVAL)
super().__init__(tick_interval=services_interval, **kwargs)
| -4,745,902,468,775,918,000
|
Initialise the behaviour.
|
packages/fetchai/skills/generic_seller/behaviours.py
|
__init__
|
ejfitzgerald/agents-aea
|
python
|
def __init__(self, **kwargs):
services_interval = kwargs.pop('services_interval', DEFAULT_SERVICES_INTERVAL)
super().__init__(tick_interval=services_interval, **kwargs)
|
def setup(self) -> None:
'\n Implement the setup.\n\n :return: None\n '
strategy = cast(GenericStrategy, self.context.strategy)
if strategy.is_ledger_tx:
ledger_api_dialogues = cast(LedgerApiDialogues, self.context.ledger_api_dialogues)
ledger_api_msg = LedgerApiMessage(performative=LedgerApiMessage.Performative.GET_BALANCE, dialogue_reference=ledger_api_dialogues.new_self_initiated_dialogue_reference(), ledger_id=strategy.ledger_id, address=cast(str, self.context.agent_addresses.get(strategy.ledger_id)))
ledger_api_msg.counterparty = LEDGER_API_ADDRESS
ledger_api_dialogues.update(ledger_api_msg)
self.context.outbox.put_message(message=ledger_api_msg)
self._register_agent()
self._register_service()
| 3,916,772,024,572,210,000
|
Implement the setup.
:return: None
|
packages/fetchai/skills/generic_seller/behaviours.py
|
setup
|
ejfitzgerald/agents-aea
|
python
|
def setup(self) -> None:
'\n Implement the setup.\n\n :return: None\n '
strategy = cast(GenericStrategy, self.context.strategy)
if strategy.is_ledger_tx:
ledger_api_dialogues = cast(LedgerApiDialogues, self.context.ledger_api_dialogues)
ledger_api_msg = LedgerApiMessage(performative=LedgerApiMessage.Performative.GET_BALANCE, dialogue_reference=ledger_api_dialogues.new_self_initiated_dialogue_reference(), ledger_id=strategy.ledger_id, address=cast(str, self.context.agent_addresses.get(strategy.ledger_id)))
ledger_api_msg.counterparty = LEDGER_API_ADDRESS
ledger_api_dialogues.update(ledger_api_msg)
self.context.outbox.put_message(message=ledger_api_msg)
self._register_agent()
self._register_service()
|
def act(self) -> None:
'\n Implement the act.\n\n :return: None\n '
| 2,904,657,344,585,305,000
|
Implement the act.
:return: None
|
packages/fetchai/skills/generic_seller/behaviours.py
|
act
|
ejfitzgerald/agents-aea
|
python
|
def act(self) -> None:
'\n Implement the act.\n\n :return: None\n '
|
def teardown(self) -> None:
'\n Implement the task teardown.\n\n :return: None\n '
self._unregister_service()
self._unregister_agent()
| -6,772,910,277,003,518,000
|
Implement the task teardown.
:return: None
|
packages/fetchai/skills/generic_seller/behaviours.py
|
teardown
|
ejfitzgerald/agents-aea
|
python
|
def teardown(self) -> None:
'\n Implement the task teardown.\n\n :return: None\n '
self._unregister_service()
self._unregister_agent()
|
def _register_agent(self) -> None:
"\n Register the agent's location.\n\n :return: None\n "
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_location_description()
oef_search_dialogues = cast(OefSearchDialogues, self.context.oef_search_dialogues)
oef_search_msg = OefSearchMessage(performative=OefSearchMessage.Performative.REGISTER_SERVICE, dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(), service_description=description)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info('registering agent on SOEF.')
| -4,562,887,594,799,922,000
|
Register the agent's location.
:return: None
|
packages/fetchai/skills/generic_seller/behaviours.py
|
_register_agent
|
ejfitzgerald/agents-aea
|
python
|
def _register_agent(self) -> None:
"\n Register the agent's location.\n\n :return: None\n "
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_location_description()
oef_search_dialogues = cast(OefSearchDialogues, self.context.oef_search_dialogues)
oef_search_msg = OefSearchMessage(performative=OefSearchMessage.Performative.REGISTER_SERVICE, dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(), service_description=description)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info('registering agent on SOEF.')
|
def _register_service(self) -> None:
"\n Register the agent's service.\n\n :return: None\n "
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_register_service_description()
oef_search_dialogues = cast(OefSearchDialogues, self.context.oef_search_dialogues)
oef_search_msg = OefSearchMessage(performative=OefSearchMessage.Performative.REGISTER_SERVICE, dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(), service_description=description)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info('registering service on SOEF.')
| 7,120,971,471,438,055,000
|
Register the agent's service.
:return: None
|
packages/fetchai/skills/generic_seller/behaviours.py
|
_register_service
|
ejfitzgerald/agents-aea
|
python
|
def _register_service(self) -> None:
"\n Register the agent's service.\n\n :return: None\n "
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_register_service_description()
oef_search_dialogues = cast(OefSearchDialogues, self.context.oef_search_dialogues)
oef_search_msg = OefSearchMessage(performative=OefSearchMessage.Performative.REGISTER_SERVICE, dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(), service_description=description)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info('registering service on SOEF.')
|
def _unregister_service(self) -> None:
'\n Unregister service from the SOEF.\n\n :return: None\n '
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_unregister_service_description()
oef_search_dialogues = cast(OefSearchDialogues, self.context.oef_search_dialogues)
oef_search_msg = OefSearchMessage(performative=OefSearchMessage.Performative.UNREGISTER_SERVICE, dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(), service_description=description)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info('unregistering service from SOEF.')
| 6,959,750,327,624,035,000
|
Unregister service from the SOEF.
:return: None
|
packages/fetchai/skills/generic_seller/behaviours.py
|
_unregister_service
|
ejfitzgerald/agents-aea
|
python
|
def _unregister_service(self) -> None:
'\n Unregister service from the SOEF.\n\n :return: None\n '
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_unregister_service_description()
oef_search_dialogues = cast(OefSearchDialogues, self.context.oef_search_dialogues)
oef_search_msg = OefSearchMessage(performative=OefSearchMessage.Performative.UNREGISTER_SERVICE, dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(), service_description=description)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info('unregistering service from SOEF.')
|
def _unregister_agent(self) -> None:
'\n Unregister agent from the SOEF.\n\n :return: None\n '
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_location_description()
oef_search_dialogues = cast(OefSearchDialogues, self.context.oef_search_dialogues)
oef_search_msg = OefSearchMessage(performative=OefSearchMessage.Performative.UNREGISTER_SERVICE, dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(), service_description=description)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info('unregistering agent from SOEF.')
| 6,479,599,807,067,536,000
|
Unregister agent from the SOEF.
:return: None
|
packages/fetchai/skills/generic_seller/behaviours.py
|
_unregister_agent
|
ejfitzgerald/agents-aea
|
python
|
def _unregister_agent(self) -> None:
'\n Unregister agent from the SOEF.\n\n :return: None\n '
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_location_description()
oef_search_dialogues = cast(OefSearchDialogues, self.context.oef_search_dialogues)
oef_search_msg = OefSearchMessage(performative=OefSearchMessage.Performative.UNREGISTER_SERVICE, dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(), service_description=description)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info('unregistering agent from SOEF.')
|
def make_data(T=20):
'\n Sample data from a HMM model and compute associated CRF potentials.\n '
random_state = np.random.RandomState(0)
d = 0.2
e = 0.1
transition_matrix = np.array([[(1 - (2 * d)), d, d], [(1 - e), e, 0], [(1 - e), 0, e]])
means = np.array([[0, 0], [10, 0], [5, (- 5)]])
covs = np.array([[[1, 0], [0, 1]], [[0.2, 0], [0, 0.3]], [[2, 0], [0, 1]]])
start_state = 0
(emissions, states) = sample(transition_matrix, means, covs, start_state, n_samples=T, random_state=random_state)
emission_log_likelihood = []
for (mean, cov) in zip(means, covs):
rv = multivariate_normal(mean, cov)
emission_log_likelihood.append(rv.logpdf(emissions)[:, np.newaxis])
emission_log_likelihood = np.concatenate(emission_log_likelihood, axis=1)
log_transition_matrix = np.log(transition_matrix)
theta = (emission_log_likelihood[:, :, np.newaxis] + log_transition_matrix[np.newaxis, :, :])
return (states, emissions, theta)
| 2,518,043,038,236,105,000
|
Sample data from a HMM model and compute associated CRF potentials.
|
deepblast/utils.py
|
make_data
|
VGligorijevic/deepblast
|
python
|
def make_data(T=20):
'\n \n '
random_state = np.random.RandomState(0)
d = 0.2
e = 0.1
transition_matrix = np.array([[(1 - (2 * d)), d, d], [(1 - e), e, 0], [(1 - e), 0, e]])
means = np.array([[0, 0], [10, 0], [5, (- 5)]])
covs = np.array([[[1, 0], [0, 1]], [[0.2, 0], [0, 0.3]], [[2, 0], [0, 1]]])
start_state = 0
(emissions, states) = sample(transition_matrix, means, covs, start_state, n_samples=T, random_state=random_state)
emission_log_likelihood = []
for (mean, cov) in zip(means, covs):
rv = multivariate_normal(mean, cov)
emission_log_likelihood.append(rv.logpdf(emissions)[:, np.newaxis])
emission_log_likelihood = np.concatenate(emission_log_likelihood, axis=1)
log_transition_matrix = np.log(transition_matrix)
theta = (emission_log_likelihood[:, :, np.newaxis] + log_transition_matrix[np.newaxis, :, :])
return (states, emissions, theta)
|
def get_data_path(fn, subfolder='data'):
"Return path to filename ``fn`` in the data folder.\n During testing it is often necessary to load data files. This\n function returns the full path to files in the ``data`` subfolder\n by default.\n Parameters\n ----------\n fn : str\n File name.\n subfolder : str, defaults to ``data``\n Name of the subfolder that contains the data.\n Returns\n -------\n str\n Inferred absolute path to the test data for the module where\n ``get_data_path(fn)`` is called.\n Notes\n -----\n The requested path may not point to an existing file, as its\n existence is not checked.\n This is from skbio's code base\n https://github.com/biocore/scikit-bio/blob/master/skbio/util/_testing.py#L50\n "
callers_filename = inspect.getouterframes(inspect.currentframe())[1][1]
path = os.path.dirname(os.path.abspath(callers_filename))
data_path = os.path.join(path, subfolder, fn)
return data_path
| -3,221,232,984,871,560,700
|
Return path to filename ``fn`` in the data folder.
During testing it is often necessary to load data files. This
function returns the full path to files in the ``data`` subfolder
by default.
Parameters
----------
fn : str
File name.
subfolder : str, defaults to ``data``
Name of the subfolder that contains the data.
Returns
-------
str
Inferred absolute path to the test data for the module where
``get_data_path(fn)`` is called.
Notes
-----
The requested path may not point to an existing file, as its
existence is not checked.
This is from skbio's code base
https://github.com/biocore/scikit-bio/blob/master/skbio/util/_testing.py#L50
|
deepblast/utils.py
|
get_data_path
|
VGligorijevic/deepblast
|
python
|
def get_data_path(fn, subfolder='data'):
"Return path to filename ``fn`` in the data folder.\n During testing it is often necessary to load data files. This\n function returns the full path to files in the ``data`` subfolder\n by default.\n Parameters\n ----------\n fn : str\n File name.\n subfolder : str, defaults to ``data``\n Name of the subfolder that contains the data.\n Returns\n -------\n str\n Inferred absolute path to the test data for the module where\n ``get_data_path(fn)`` is called.\n Notes\n -----\n The requested path may not point to an existing file, as its\n existence is not checked.\n This is from skbio's code base\n https://github.com/biocore/scikit-bio/blob/master/skbio/util/_testing.py#L50\n "
callers_filename = inspect.getouterframes(inspect.currentframe())[1][1]
path = os.path.dirname(os.path.abspath(callers_filename))
data_path = os.path.join(path, subfolder, fn)
return data_path
|
def _get_best_axes(first_pos, axes):
"\n Determine the best pair of inertial axes so that we don't get large-scale breakdowns from the choice of embedding\n\n :param first_pos:\n :type first_pos:\n :param axes:\n :type axes:\n :return:\n :rtype:\n "
if (axes.ndim > 2):
axes = axes[..., (0, 1), :]
ax_choice = (0, 1)
ax_names = ['A', 'B']
else:
fp_norm = np.linalg.norm(first_pos)
if (fp_norm > 1e-10):
first_pos = (first_pos / fp_norm)
a_proj = np.dot(first_pos, axes[0])
b_proj = np.dot(first_pos, axes[1])
c_proj = np.dot(first_pos, axes[2])
if (np.abs(b_proj) < 0.05):
if (np.abs(a_proj) > 0.95):
ax_choice = (1, 2)
ax_names = ['B', 'C']
else:
ax_choice = (0, 1)
ax_names = ['A', 'B']
elif (np.abs(c_proj) < 0.05):
if (np.abs(a_proj) > 0.95):
ax_choice = (1, 2)
ax_names = ['B', 'C']
else:
ax_choice = (0, 2)
ax_names = ['A', 'C']
elif (np.abs(a_proj) < 0.05):
if (np.abs(b_proj) > 0.95):
ax_choice = (0, 2)
ax_names = ['A', 'C']
else:
ax_choice = (0, 1)
ax_names = ['A', 'B']
else:
ax_choice = (0, 1)
ax_names = ['A', 'B']
else:
ax_choice = (0, 1)
ax_names = ['A', 'B']
axes = axes[(ax_choice,)]
return (axes, ax_names, ax_choice)
| 8,478,339,699,627,261,000
|
Determine the best pair of inertial axes so that we don't get large-scale breakdowns from the choice of embedding
:param first_pos:
:type first_pos:
:param axes:
:type axes:
:return:
:rtype:
|
Psience/Molecools/CoordinateSystems.py
|
_get_best_axes
|
McCoyGroup/Coordinerds
|
python
|
def _get_best_axes(first_pos, axes):
"\n Determine the best pair of inertial axes so that we don't get large-scale breakdowns from the choice of embedding\n\n :param first_pos:\n :type first_pos:\n :param axes:\n :type axes:\n :return:\n :rtype:\n "
if (axes.ndim > 2):
axes = axes[..., (0, 1), :]
ax_choice = (0, 1)
ax_names = ['A', 'B']
else:
fp_norm = np.linalg.norm(first_pos)
if (fp_norm > 1e-10):
first_pos = (first_pos / fp_norm)
a_proj = np.dot(first_pos, axes[0])
b_proj = np.dot(first_pos, axes[1])
c_proj = np.dot(first_pos, axes[2])
if (np.abs(b_proj) < 0.05):
if (np.abs(a_proj) > 0.95):
ax_choice = (1, 2)
ax_names = ['B', 'C']
else:
ax_choice = (0, 1)
ax_names = ['A', 'B']
elif (np.abs(c_proj) < 0.05):
if (np.abs(a_proj) > 0.95):
ax_choice = (1, 2)
ax_names = ['B', 'C']
else:
ax_choice = (0, 2)
ax_names = ['A', 'C']
elif (np.abs(a_proj) < 0.05):
if (np.abs(b_proj) > 0.95):
ax_choice = (0, 2)
ax_names = ['A', 'C']
else:
ax_choice = (0, 1)
ax_names = ['A', 'B']
else:
ax_choice = (0, 1)
ax_names = ['A', 'B']
else:
ax_choice = (0, 1)
ax_names = ['A', 'B']
axes = axes[(ax_choice,)]
return (axes, ax_names, ax_choice)
|
def __init__(self, molecule, converter_options=None, **opts):
'\n\n :param molecule:\n :type molecule: AbstractMolecule\n :param converter_options:\n :type converter_options:\n :param opts:\n :type opts:\n '
self.molecule = molecule
if (converter_options is None):
converter_options = opts
opts = {}
nats = len(molecule.atoms)
super().__init__(converter_options=converter_options, dimension=(nats, 3), coordinate_shape=(nats, 3), opts=opts)
self.set_embedding()
| 5,347,917,261,416,498,000
|
:param molecule:
:type molecule: AbstractMolecule
:param converter_options:
:type converter_options:
:param opts:
:type opts:
|
Psience/Molecools/CoordinateSystems.py
|
__init__
|
McCoyGroup/Coordinerds
|
python
|
def __init__(self, molecule, converter_options=None, **opts):
'\n\n :param molecule:\n :type molecule: AbstractMolecule\n :param converter_options:\n :type converter_options:\n :param opts:\n :type opts:\n '
self.molecule = molecule
if (converter_options is None):
converter_options = opts
opts = {}
nats = len(molecule.atoms)
super().__init__(converter_options=converter_options, dimension=(nats, 3), coordinate_shape=(nats, 3), opts=opts)
self.set_embedding()
|
def __init__(self, molecule, converter_options=None, **opts):
'\n\n :param molecule:\n :type molecule: AbstractMolecule\n :param converter_options:\n :type converter_options:\n :param opts:\n :type opts:\n '
self.molecule = molecule
nats = len(self.molecule.atoms)
if (converter_options is None):
converter_options = opts
opts = {}
super().__init__(converter_options=converter_options, dimension=(nats, 3), opts=opts)
| -4,174,696,519,157,836,000
|
:param molecule:
:type molecule: AbstractMolecule
:param converter_options:
:type converter_options:
:param opts:
:type opts:
|
Psience/Molecools/CoordinateSystems.py
|
__init__
|
McCoyGroup/Coordinerds
|
python
|
def __init__(self, molecule, converter_options=None, **opts):
'\n\n :param molecule:\n :type molecule: AbstractMolecule\n :param converter_options:\n :type converter_options:\n :param opts:\n :type opts:\n '
self.molecule = molecule
nats = len(self.molecule.atoms)
if (converter_options is None):
converter_options = opts
opts = {}
super().__init__(converter_options=converter_options, dimension=(nats, 3), opts=opts)
|
def set_embedding(self):
'\n Sets up the embedding options...\n :return:\n :rtype:\n '
molecule = self.molecule
com = molecule.center_of_mass
axes = molecule.inertial_axes
converter_options = self.converter_options
if ('ordering' in converter_options):
ordering = np.array(converter_options['ordering'], dtype=int)
ordering[(0, 1)] = (- 3)
ordering[(0, 2)] = (- 2)
ordering[(0, 3)] = (- 1)
ordering[(1, 2)] = (- 1)
ordering[(1, 3)] = (- 2)
ordering[(2, 3)] = (- 2)
converter_options['ordering'] = ordering
first = ordering[(0, 0)]
else:
first = 0
first_pos = molecule.coords[first]
(axes, ax_names, ax_choice) = _get_best_axes(first_pos, axes)
converter_options['origins'] = com
converter_options['axes'] = axes
converter_options['axes_labels'] = ax_names
converter_options['axes_choice'] = ax_choice
converter_options['molecule'] = molecule
| 2,874,453,837,014,049,000
|
Sets up the embedding options...
:return:
:rtype:
|
Psience/Molecools/CoordinateSystems.py
|
set_embedding
|
McCoyGroup/Coordinerds
|
python
|
def set_embedding(self):
'\n Sets up the embedding options...\n :return:\n :rtype:\n '
molecule = self.molecule
com = molecule.center_of_mass
axes = molecule.inertial_axes
converter_options = self.converter_options
if ('ordering' in converter_options):
ordering = np.array(converter_options['ordering'], dtype=int)
ordering[(0, 1)] = (- 3)
ordering[(0, 2)] = (- 2)
ordering[(0, 3)] = (- 1)
ordering[(1, 2)] = (- 1)
ordering[(1, 3)] = (- 2)
ordering[(2, 3)] = (- 2)
converter_options['ordering'] = ordering
first = ordering[(0, 0)]
else:
first = 0
first_pos = molecule.coords[first]
(axes, ax_names, ax_choice) = _get_best_axes(first_pos, axes)
converter_options['origins'] = com
converter_options['axes'] = axes
converter_options['axes_labels'] = ax_names
converter_options['axes_choice'] = ax_choice
converter_options['molecule'] = molecule
|
def convert(self, coords, molecule=None, origins=None, axes=None, ordering=None, **kwargs):
'\n Converts from Cartesian to ZMatrix coords, preserving the embedding\n :param coords:\n :type coords: CoordinateSet\n :param molecule:\n :type molecule:\n :param origins:\n :type origins:\n :param axes:\n :type axes:\n :param ordering:\n :type ordering:\n :param kwargs:\n :type kwargs:\n :return:\n :rtype:\n '
(zmcs, opts) = self.convert_many(np.array([coords]), molecule=molecule, origins=origins, axes=axes, ordering=ordering, **kwargs)
zmcs = zmcs[0]
if ('derivs' in opts):
derivs = opts['derivs']
reshaped_derivs = ([None] * len(derivs))
for (i, v) in enumerate(derivs):
reshaped_derivs[i] = v[0]
opts['derivs'] = reshaped_derivs
return (zmcs, opts)
| -603,779,603,356,333,300
|
Converts from Cartesian to ZMatrix coords, preserving the embedding
:param coords:
:type coords: CoordinateSet
:param molecule:
:type molecule:
:param origins:
:type origins:
:param axes:
:type axes:
:param ordering:
:type ordering:
:param kwargs:
:type kwargs:
:return:
:rtype:
|
Psience/Molecools/CoordinateSystems.py
|
convert
|
McCoyGroup/Coordinerds
|
python
|
def convert(self, coords, molecule=None, origins=None, axes=None, ordering=None, **kwargs):
'\n Converts from Cartesian to ZMatrix coords, preserving the embedding\n :param coords:\n :type coords: CoordinateSet\n :param molecule:\n :type molecule:\n :param origins:\n :type origins:\n :param axes:\n :type axes:\n :param ordering:\n :type ordering:\n :param kwargs:\n :type kwargs:\n :return:\n :rtype:\n '
(zmcs, opts) = self.convert_many(np.array([coords]), molecule=molecule, origins=origins, axes=axes, ordering=ordering, **kwargs)
zmcs = zmcs[0]
if ('derivs' in opts):
derivs = opts['derivs']
reshaped_derivs = ([None] * len(derivs))
for (i, v) in enumerate(derivs):
reshaped_derivs[i] = v[0]
opts['derivs'] = reshaped_derivs
return (zmcs, opts)
|
def convert_many(self, coords, molecule=None, origins=None, axes=None, ordering=None, strip_embedding=True, strip_dummies=False, **kwargs):
'\n Converts from Cartesian to ZMatrix coords, preserving the embedding\n\n :param coords: coordinates in Cartesians to convert\n :type coords: np.ndarray\n :param molecule:\n :type molecule: AbstractMolecule\n :param origins: the origin for each individual structure\n :type origins: np.ndarray\n :param axes: the axes for each structure\n :type axes: np.ndarray\n :param ordering: the Z-matrix ordering spec\n :type ordering:\n :param strip_embedding: whether to strip the embedding coordinates\n :type strip_embedding:\n :param strip_dummies: whether to strip all dummy coordinates\n :type strip_dummies:\n :param kwargs:\n :type kwargs:\n :return:\n :rtype:\n '
n_sys = coords.shape[0]
n_coords = coords.shape[1]
n_atoms = len(molecule.atoms)
if (origins.ndim == 1):
origins = np.broadcast_to(origins[(np.newaxis, np.newaxis)], (n_sys, 1, 3))
elif (origins.ndim == 2):
origins = origins[:, np.newaxis, :]
if (axes.ndim == 2):
axes = np.broadcast_to(axes[np.newaxis], (n_sys, 2, 3))
if (origins.shape[0] != n_sys):
if ((n_sys % origins.shape[0]) != 0):
raise ValueError('inconsistent shapes; origins shape {} but coords shape {}'.format(origins.shape, coords.shape))
num_coords = (n_sys // origins.shape[0])
origins = np.broadcast_to(origins[:, np.newaxis, :, :], ((origins.shape[0], num_coords) + origins.shape[1:]))
origins = origins.reshape(((n_sys,) + origins.shape[2:]))
if (axes.shape[0] != n_sys):
if ((n_sys % axes.shape[0]) != 0):
raise ValueError('inconsistent shapes; axes shape {} but coords shape {}'.format(axes.shape, coords.shape))
num_coords = (n_sys // axes.shape[0])
axes = np.broadcast_to(axes[:, np.newaxis, :, :], ((axes.shape[0], num_coords) + axes.shape[1:]))
axes = axes.reshape(((n_sys,) + axes.shape[2:]))
coords = np.concatenate([origins, (origins + axes), coords], axis=1)
if (ordering is not None):
ordering = np.array(ordering, dtype=int)
ordering[(0, 1)] = (- 3)
ordering[(0, 2)] = (- 2)
ordering[(0, 3)] = (- 1)
ordering[(1, 2)] = (- 2)
ordering[(1, 3)] = (- 1)
ordering[(2, 3)] = (- 1)
ordering = (ordering + 3)
ordering = np.concatenate([[[0, (- 1), (- 1), (- 1)], [1, 0, (- 1), (- 1)], [2, 0, 1, (- 1)]], ordering])
res = CoordinateSet(coords, CartesianCoordinates3D).convert(ZMatrixCoordinates, ordering=ordering, origins=origins, axes=axes, **kwargs)
if isinstance(res, tuple):
(zmcs, opts) = res
else:
zmcs = res
opts = res.converter_options
opts['ordering'] = (opts['ordering'][3:] - 3)
if strip_dummies:
dummies = ([0, 1, 2] + [(x + 3) for x in molecule.dummy_positions])
elif strip_embedding:
dummies = [0, 1, 2]
else:
dummies = None
if (dummies is not None):
main_excludes = np.setdiff1d(np.arange((len(molecule.atoms) + 3)), dummies)
sub_excludes = (main_excludes - 1)
if ('derivs' in opts):
derivs = opts['derivs']
reshaped_derivs = ([None] * len(derivs))
deriv_excludes = np.arange(3, (len(molecule.atoms) + 3))
for (i, v) in enumerate(derivs):
start_dim = (v.ndim - (2 * (i + 2)))
for j in range(start_dim, (v.ndim - 2), 2):
v = np.take(v, deriv_excludes, axis=j)
v = np.take(v, sub_excludes, axis=(- 2))
reshaped_derivs[i] = v
opts['derivs'] = reshaped_derivs
zmcs = zmcs[..., sub_excludes, :]
return (zmcs, opts)
| 7,629,362,426,075,218,000
|
Converts from Cartesian to ZMatrix coords, preserving the embedding
:param coords: coordinates in Cartesians to convert
:type coords: np.ndarray
:param molecule:
:type molecule: AbstractMolecule
:param origins: the origin for each individual structure
:type origins: np.ndarray
:param axes: the axes for each structure
:type axes: np.ndarray
:param ordering: the Z-matrix ordering spec
:type ordering:
:param strip_embedding: whether to strip the embedding coordinates
:type strip_embedding:
:param strip_dummies: whether to strip all dummy coordinates
:type strip_dummies:
:param kwargs:
:type kwargs:
:return:
:rtype:
|
Psience/Molecools/CoordinateSystems.py
|
convert_many
|
McCoyGroup/Coordinerds
|
python
|
def convert_many(self, coords, molecule=None, origins=None, axes=None, ordering=None, strip_embedding=True, strip_dummies=False, **kwargs):
'\n Converts from Cartesian to ZMatrix coords, preserving the embedding\n\n :param coords: coordinates in Cartesians to convert\n :type coords: np.ndarray\n :param molecule:\n :type molecule: AbstractMolecule\n :param origins: the origin for each individual structure\n :type origins: np.ndarray\n :param axes: the axes for each structure\n :type axes: np.ndarray\n :param ordering: the Z-matrix ordering spec\n :type ordering:\n :param strip_embedding: whether to strip the embedding coordinates\n :type strip_embedding:\n :param strip_dummies: whether to strip all dummy coordinates\n :type strip_dummies:\n :param kwargs:\n :type kwargs:\n :return:\n :rtype:\n '
n_sys = coords.shape[0]
n_coords = coords.shape[1]
n_atoms = len(molecule.atoms)
if (origins.ndim == 1):
origins = np.broadcast_to(origins[(np.newaxis, np.newaxis)], (n_sys, 1, 3))
elif (origins.ndim == 2):
origins = origins[:, np.newaxis, :]
if (axes.ndim == 2):
axes = np.broadcast_to(axes[np.newaxis], (n_sys, 2, 3))
if (origins.shape[0] != n_sys):
if ((n_sys % origins.shape[0]) != 0):
raise ValueError('inconsistent shapes; origins shape {} but coords shape {}'.format(origins.shape, coords.shape))
num_coords = (n_sys // origins.shape[0])
origins = np.broadcast_to(origins[:, np.newaxis, :, :], ((origins.shape[0], num_coords) + origins.shape[1:]))
origins = origins.reshape(((n_sys,) + origins.shape[2:]))
if (axes.shape[0] != n_sys):
if ((n_sys % axes.shape[0]) != 0):
raise ValueError('inconsistent shapes; axes shape {} but coords shape {}'.format(axes.shape, coords.shape))
num_coords = (n_sys // axes.shape[0])
axes = np.broadcast_to(axes[:, np.newaxis, :, :], ((axes.shape[0], num_coords) + axes.shape[1:]))
axes = axes.reshape(((n_sys,) + axes.shape[2:]))
coords = np.concatenate([origins, (origins + axes), coords], axis=1)
if (ordering is not None):
ordering = np.array(ordering, dtype=int)
ordering[(0, 1)] = (- 3)
ordering[(0, 2)] = (- 2)
ordering[(0, 3)] = (- 1)
ordering[(1, 2)] = (- 2)
ordering[(1, 3)] = (- 1)
ordering[(2, 3)] = (- 1)
ordering = (ordering + 3)
ordering = np.concatenate([[[0, (- 1), (- 1), (- 1)], [1, 0, (- 1), (- 1)], [2, 0, 1, (- 1)]], ordering])
res = CoordinateSet(coords, CartesianCoordinates3D).convert(ZMatrixCoordinates, ordering=ordering, origins=origins, axes=axes, **kwargs)
if isinstance(res, tuple):
(zmcs, opts) = res
else:
zmcs = res
opts = res.converter_options
opts['ordering'] = (opts['ordering'][3:] - 3)
if strip_dummies:
dummies = ([0, 1, 2] + [(x + 3) for x in molecule.dummy_positions])
elif strip_embedding:
dummies = [0, 1, 2]
else:
dummies = None
if (dummies is not None):
main_excludes = np.setdiff1d(np.arange((len(molecule.atoms) + 3)), dummies)
sub_excludes = (main_excludes - 1)
if ('derivs' in opts):
derivs = opts['derivs']
reshaped_derivs = ([None] * len(derivs))
deriv_excludes = np.arange(3, (len(molecule.atoms) + 3))
for (i, v) in enumerate(derivs):
start_dim = (v.ndim - (2 * (i + 2)))
for j in range(start_dim, (v.ndim - 2), 2):
v = np.take(v, deriv_excludes, axis=j)
v = np.take(v, sub_excludes, axis=(- 2))
reshaped_derivs[i] = v
opts['derivs'] = reshaped_derivs
zmcs = zmcs[..., sub_excludes, :]
return (zmcs, opts)
|
def convert_many(self, coords, **kwargs):
'\n Converts from Cartesian to ZMatrix coords, preserving the embedding\n '
return (coords, kwargs)
| -2,000,805,347,107,456,500
|
Converts from Cartesian to ZMatrix coords, preserving the embedding
|
Psience/Molecools/CoordinateSystems.py
|
convert_many
|
McCoyGroup/Coordinerds
|
python
|
def convert_many(self, coords, **kwargs):
'\n \n '
return (coords, kwargs)
|
def convert_many(self, coords, molecule=None, origins=None, axes=None, ordering=None, reembed=False, axes_choice=None, return_derivs=None, strip_dummies=False, strip_embedding=True, planar_ref_tolerance=None, **kwargs):
'\n Converts from Cartesian to ZMatrix coords, attempting to preserve the embedding\n '
from .Molecule import Molecule
n_sys = coords.shape[0]
n_coords = coords.shape[1]
n_atoms = len(molecule.atoms)
if (n_coords != (n_atoms + 2)):
if (n_coords != n_atoms):
raise ValueError('Embedding unclear when num_coords ({}) < num_atoms ({})'.format(n_coords, n_atoms))
x_ax = axes[..., 0, :]
y_ax = axes[..., 1, :]
extra_norms0 = nput.vec_norms(x_ax)
extra_norms1 = nput.vec_norms(y_ax)
(extra_angles, _) = nput.vec_angles(x_ax, y_ax)
extra_coords = np.zeros((n_sys, 2, 3))
extra_coords[(..., 0, 0)] = extra_norms0
extra_coords[(..., 1, 0)] = extra_norms1
extra_coords[(..., 1, 1)] = extra_angles
coords = np.concatenate([extra_coords, coords], axis=(- 2))
if (ordering is not None):
ordering = np.array(ordering, dtype=int)
ordering = (ordering + 3)
ordering = np.concatenate([[[0, (- 1), (- 1), (- 1)], [1, 0, (- 1), (- 1)], [2, 0, 1, (- 1)]], ordering])
refuse_derivs = (reembed and (coords.squeeze().ndim != 2))
res = CoordinateSet(coords, ZMatrixCoordinates).convert(CartesianCoordinates3D, ordering=ordering, origins=origins, axes=axes, return_derivs=(return_derivs and (not refuse_derivs)), **kwargs)
if isinstance(res, tuple):
(carts, opts) = res
else:
carts = res
opts = res.converter_options
if reembed:
if (molecule is None):
raise ValueError("can't reembed without a reference structure")
embed_carts = carts[..., 3:, :]
reembed = (not ((carts.squeeze().ndim == 2) and np.allclose(molecule.coords, embed_carts, atol=1e-05)))
if reembed:
if (not return_derivs):
embed_carts = molecule.embed_coords(embed_carts, planar_ref_tolerance=planar_ref_tolerance)
carts = np.concatenate([carts[..., :3, :], embed_carts], axis=(- 2))
else:
(inert_coords, coord_coms, coord_axes) = Molecule(molecule.atoms, embed_carts).principle_axis_data
if (axes_choice is None):
axes_choice = (0, 1)
guh = self.convert_many(coords, origins=coord_coms, axes=coord_axes[:, axes_choice], molecule=molecule, reembed=False, ordering=ordering, return_derivs=return_derivs, axes_choice=axes_choice, **kwargs)
return guh
opts['origins'] = origins
opts['axes'] = axes
if (ordering is not None):
opts['ordering'] = (ordering[3:] - 3)
if strip_dummies:
dummies = ([0, 1, 2] + [(x + 3) for x in molecule.dummy_positions])
elif strip_embedding:
dummies = [0, 1, 2]
else:
dummies = None
if (dummies is not None):
main_excludes = np.setdiff1d(np.arange((len(molecule.atoms) + 3)), dummies)
sub_excludes = (main_excludes - 1)
if ('derivs' in opts):
derivs = opts['derivs']
reshaped_derivs = ([None] * len(derivs))
deriv_excludes = np.arange(3, (len(molecule.atoms) + 3))
for (i, v) in enumerate(derivs):
start_dim = (v.ndim - i)
for j in range(start_dim, v.ndim, 2):
v = np.take(v, deriv_excludes, axis=j)
v = np.take(v, sub_excludes, axis=(- 2))
reshaped_derivs[i] = v
opts['derivs'] = reshaped_derivs
carts = carts[..., main_excludes, :]
return (carts, opts)
| 6,613,672,720,733,352,000
|
Converts from Cartesian to ZMatrix coords, attempting to preserve the embedding
|
Psience/Molecools/CoordinateSystems.py
|
convert_many
|
McCoyGroup/Coordinerds
|
python
|
def convert_many(self, coords, molecule=None, origins=None, axes=None, ordering=None, reembed=False, axes_choice=None, return_derivs=None, strip_dummies=False, strip_embedding=True, planar_ref_tolerance=None, **kwargs):
'\n \n '
from .Molecule import Molecule
n_sys = coords.shape[0]
n_coords = coords.shape[1]
n_atoms = len(molecule.atoms)
if (n_coords != (n_atoms + 2)):
if (n_coords != n_atoms):
raise ValueError('Embedding unclear when num_coords ({}) < num_atoms ({})'.format(n_coords, n_atoms))
x_ax = axes[..., 0, :]
y_ax = axes[..., 1, :]
extra_norms0 = nput.vec_norms(x_ax)
extra_norms1 = nput.vec_norms(y_ax)
(extra_angles, _) = nput.vec_angles(x_ax, y_ax)
extra_coords = np.zeros((n_sys, 2, 3))
extra_coords[(..., 0, 0)] = extra_norms0
extra_coords[(..., 1, 0)] = extra_norms1
extra_coords[(..., 1, 1)] = extra_angles
coords = np.concatenate([extra_coords, coords], axis=(- 2))
if (ordering is not None):
ordering = np.array(ordering, dtype=int)
ordering = (ordering + 3)
ordering = np.concatenate([[[0, (- 1), (- 1), (- 1)], [1, 0, (- 1), (- 1)], [2, 0, 1, (- 1)]], ordering])
refuse_derivs = (reembed and (coords.squeeze().ndim != 2))
res = CoordinateSet(coords, ZMatrixCoordinates).convert(CartesianCoordinates3D, ordering=ordering, origins=origins, axes=axes, return_derivs=(return_derivs and (not refuse_derivs)), **kwargs)
if isinstance(res, tuple):
(carts, opts) = res
else:
carts = res
opts = res.converter_options
if reembed:
if (molecule is None):
raise ValueError("can't reembed without a reference structure")
embed_carts = carts[..., 3:, :]
reembed = (not ((carts.squeeze().ndim == 2) and np.allclose(molecule.coords, embed_carts, atol=1e-05)))
if reembed:
if (not return_derivs):
embed_carts = molecule.embed_coords(embed_carts, planar_ref_tolerance=planar_ref_tolerance)
carts = np.concatenate([carts[..., :3, :], embed_carts], axis=(- 2))
else:
(inert_coords, coord_coms, coord_axes) = Molecule(molecule.atoms, embed_carts).principle_axis_data
if (axes_choice is None):
axes_choice = (0, 1)
guh = self.convert_many(coords, origins=coord_coms, axes=coord_axes[:, axes_choice], molecule=molecule, reembed=False, ordering=ordering, return_derivs=return_derivs, axes_choice=axes_choice, **kwargs)
return guh
opts['origins'] = origins
opts['axes'] = axes
if (ordering is not None):
opts['ordering'] = (ordering[3:] - 3)
if strip_dummies:
dummies = ([0, 1, 2] + [(x + 3) for x in molecule.dummy_positions])
elif strip_embedding:
dummies = [0, 1, 2]
else:
dummies = None
if (dummies is not None):
main_excludes = np.setdiff1d(np.arange((len(molecule.atoms) + 3)), dummies)
sub_excludes = (main_excludes - 1)
if ('derivs' in opts):
derivs = opts['derivs']
reshaped_derivs = ([None] * len(derivs))
deriv_excludes = np.arange(3, (len(molecule.atoms) + 3))
for (i, v) in enumerate(derivs):
start_dim = (v.ndim - i)
for j in range(start_dim, v.ndim, 2):
v = np.take(v, deriv_excludes, axis=j)
v = np.take(v, sub_excludes, axis=(- 2))
reshaped_derivs[i] = v
opts['derivs'] = reshaped_derivs
carts = carts[..., main_excludes, :]
return (carts, opts)
|
def format_cfg(cfg):
'Format experiment config for friendly display'
def list2str(cfg):
for (key, value) in cfg.items():
if isinstance(value, dict):
cfg[key] = list2str(value)
elif isinstance(value, list):
if ((len(value) == 0) or isinstance(value[0], (int, float))):
cfg[key] = str(value)
else:
for (i, item) in enumerate(value):
if isinstance(item, dict):
value[i] = list2str(item)
cfg[key] = value
return cfg
cfg = list2str(copy.deepcopy(cfg))
json_str = json.dumps(cfg, indent=2, ensure_ascii=False).split('\n')
json_str = [re.sub('(\\"|(!\\],$)|\\s$)', '', line) for line in json_str]
cfg_str = '\n'.join([line.rstrip() for line in json_str if line.strip()])
return cfg_str
| 9,010,408,615,262,421,000
|
Format experiment config for friendly display
|
up/utils/general/cfg_helper.py
|
format_cfg
|
ModelTC/EOD
|
python
|
def format_cfg(cfg):
def list2str(cfg):
for (key, value) in cfg.items():
if isinstance(value, dict):
cfg[key] = list2str(value)
elif isinstance(value, list):
if ((len(value) == 0) or isinstance(value[0], (int, float))):
cfg[key] = str(value)
else:
for (i, item) in enumerate(value):
if isinstance(item, dict):
value[i] = list2str(item)
cfg[key] = value
return cfg
cfg = list2str(copy.deepcopy(cfg))
json_str = json.dumps(cfg, indent=2, ensure_ascii=False).split('\n')
json_str = [re.sub('(\\"|(!\\],$)|\\s$)', , line) for line in json_str]
cfg_str = '\n'.join([line.rstrip() for line in json_str if line.strip()])
return cfg_str
|
def try_decode(val):
'bool, int, float, or str'
if (val.upper() == 'FALSE'):
return False
elif (val.upper() == 'TRUE'):
return True
if val.isdigit():
return int(val)
if is_number(val):
return float(val)
return val
| 9,010,911,974,271,447,000
|
bool, int, float, or str
|
up/utils/general/cfg_helper.py
|
try_decode
|
ModelTC/EOD
|
python
|
def try_decode(val):
if (val.upper() == 'FALSE'):
return False
elif (val.upper() == 'TRUE'):
return True
if val.isdigit():
return int(val)
if is_number(val):
return float(val)
return val
|
@sdc_min_version('3.15.0')
def test_runner_metrics_for_init_and_destroy(sdc_builder, sdc_executor):
'Ensure that we properly update metrics when the runner is in starting phase.'
builder = sdc_builder.get_pipeline_builder()
SLEEP_SCRIPT = 'sleep(5*1000)'
source = builder.add_stage('Dev Data Generator')
groovy = builder.add_stage('Groovy Evaluator', type='processor')
groovy.init_script = SLEEP_SCRIPT
groovy.destroy_script = SLEEP_SCRIPT
groovy.script = SLEEP_SCRIPT
trash = builder.add_stage('Trash')
((source >> groovy) >> trash)
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline, wait=False)
count = 0
while True:
metrics_json = sdc_executor.api_client.get_pipeline_metrics(pipeline.id)
if metrics_json:
metrics = Metrics(metrics_json)
logger.info(f"Detected runtime gauge state {metrics.gauge('runner.0.gauge').value['state']}")
if (metrics.gauge('runner.0.gauge').value['state'] == 'Starting'):
count += 1
status = sdc_executor.get_pipeline_status(pipeline).response.json()
sleep(0.5)
if (status.get('status') == 'RUNNING'):
break
assert (count > 0)
sdc_executor.stop_pipeline(pipeline)
| 5,452,576,526,804,625,000
|
Ensure that we properly update metrics when the runner is in starting phase.
|
pipeline/test_metrics.py
|
test_runner_metrics_for_init_and_destroy
|
anubandhan/datacollector-tests
|
python
|
@sdc_min_version('3.15.0')
def test_runner_metrics_for_init_and_destroy(sdc_builder, sdc_executor):
builder = sdc_builder.get_pipeline_builder()
SLEEP_SCRIPT = 'sleep(5*1000)'
source = builder.add_stage('Dev Data Generator')
groovy = builder.add_stage('Groovy Evaluator', type='processor')
groovy.init_script = SLEEP_SCRIPT
groovy.destroy_script = SLEEP_SCRIPT
groovy.script = SLEEP_SCRIPT
trash = builder.add_stage('Trash')
((source >> groovy) >> trash)
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline, wait=False)
count = 0
while True:
metrics_json = sdc_executor.api_client.get_pipeline_metrics(pipeline.id)
if metrics_json:
metrics = Metrics(metrics_json)
logger.info(f"Detected runtime gauge state {metrics.gauge('runner.0.gauge').value['state']}")
if (metrics.gauge('runner.0.gauge').value['state'] == 'Starting'):
count += 1
status = sdc_executor.get_pipeline_status(pipeline).response.json()
sleep(0.5)
if (status.get('status') == 'RUNNING'):
break
assert (count > 0)
sdc_executor.stop_pipeline(pipeline)
|
def get_task_id(self):
'Property to get the task id of this component'
return self.task_id
| -5,503,473,864,786,678,000
|
Property to get the task id of this component
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
get_task_id
|
kalimfaria/heron
|
python
|
def get_task_id(self):
return self.task_id
|
def get_component_id(self):
'Property to get the component id of this component'
return self.task_to_component_map.get(self.get_task_id())
| 7,983,561,852,347,411,000
|
Property to get the component id of this component
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
get_component_id
|
kalimfaria/heron
|
python
|
def get_component_id(self):
return self.task_to_component_map.get(self.get_task_id())
|
def get_cluster_config(self):
'Returns the cluster config for this component\n\n Note that the returned config is auto-typed map: <str -> any Python object>.\n '
return self.config
| 2,560,026,072,691,256,000
|
Returns the cluster config for this component
Note that the returned config is auto-typed map: <str -> any Python object>.
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
get_cluster_config
|
kalimfaria/heron
|
python
|
def get_cluster_config(self):
'Returns the cluster config for this component\n\n Note that the returned config is auto-typed map: <str -> any Python object>.\n '
return self.config
|
def get_topology_name(self):
'Returns the name of the topology\n '
return str(self.topology.name)
| -6,082,850,419,871,236,000
|
Returns the name of the topology
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
get_topology_name
|
kalimfaria/heron
|
python
|
def get_topology_name(self):
'\n '
return str(self.topology.name)
|
def register_metric(self, name, metric, time_bucket_in_sec):
'Registers a new metric to this context'
collector = self.get_metrics_collector()
collector.register_metric(name, metric, time_bucket_in_sec)
| -3,845,584,536,058,857,500
|
Registers a new metric to this context
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
register_metric
|
kalimfaria/heron
|
python
|
def register_metric(self, name, metric, time_bucket_in_sec):
collector = self.get_metrics_collector()
collector.register_metric(name, metric, time_bucket_in_sec)
|
def get_sources(self, component_id):
'Returns the declared inputs to specified component\n\n :return: map <streamId namedtuple (same structure as protobuf msg) -> gtype>, or\n None if not found\n '
StreamId = namedtuple('StreamId', 'id, component_name')
if (component_id in self.inputs):
ret = {}
for istream in self.inputs.get(component_id):
key = StreamId(id=istream.stream.id, component_name=istream.stream.component_name)
ret[key] = istream.gtype
return ret
else:
return None
| 9,033,404,051,955,407,000
|
Returns the declared inputs to specified component
:return: map <streamId namedtuple (same structure as protobuf msg) -> gtype>, or
None if not found
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
get_sources
|
kalimfaria/heron
|
python
|
def get_sources(self, component_id):
'Returns the declared inputs to specified component\n\n :return: map <streamId namedtuple (same structure as protobuf msg) -> gtype>, or\n None if not found\n '
StreamId = namedtuple('StreamId', 'id, component_name')
if (component_id in self.inputs):
ret = {}
for istream in self.inputs.get(component_id):
key = StreamId(id=istream.stream.id, component_name=istream.stream.component_name)
ret[key] = istream.gtype
return ret
else:
return None
|
def get_component_tasks(self, component_id):
'Returns the task ids allocated for the given component id'
ret = []
for (task_id, comp_id) in self.task_to_component_map.items():
if (comp_id == component_id):
ret.append(task_id)
return ret
| 6,872,420,084,559,937,000
|
Returns the task ids allocated for the given component id
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
get_component_tasks
|
kalimfaria/heron
|
python
|
def get_component_tasks(self, component_id):
ret = []
for (task_id, comp_id) in self.task_to_component_map.items():
if (comp_id == component_id):
ret.append(task_id)
return ret
|
def add_task_hook(self, task_hook):
'Registers a specified task hook to this context\n\n :type task_hook: heron.instance.src.python.utils.topology.ITaskHook\n :param task_hook: Implementation of ITaskHook\n '
if (not isinstance(task_hook, ITaskHook)):
raise TypeError(('In add_task_hook(): attempt to add non ITaskHook instance, given: %s' % str(type(task_hook))))
self.task_hooks.append(task_hook)
| -3,723,520,555,085,917,000
|
Registers a specified task hook to this context
:type task_hook: heron.instance.src.python.utils.topology.ITaskHook
:param task_hook: Implementation of ITaskHook
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
add_task_hook
|
kalimfaria/heron
|
python
|
def add_task_hook(self, task_hook):
'Registers a specified task hook to this context\n\n :type task_hook: heron.instance.src.python.utils.topology.ITaskHook\n :param task_hook: Implementation of ITaskHook\n '
if (not isinstance(task_hook, ITaskHook)):
raise TypeError(('In add_task_hook(): attempt to add non ITaskHook instance, given: %s' % str(type(task_hook))))
self.task_hooks.append(task_hook)
|
def get_topology_pex_path(self):
"Returns the topology's pex file path"
return self.topology_pex_path
| 8,942,431,209,857,057,000
|
Returns the topology's pex file path
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
get_topology_pex_path
|
kalimfaria/heron
|
python
|
def get_topology_pex_path(self):
return self.topology_pex_path
|
def get_metrics_collector(self):
"Returns this context's metrics collector"
if ((self.metrics_collector is None) or (not isinstance(self.metrics_collector, MetricsCollector))):
raise RuntimeError('Metrics collector is not registered in this context')
return self.metrics_collector
| -5,930,916,964,854,351,000
|
Returns this context's metrics collector
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
get_metrics_collector
|
kalimfaria/heron
|
python
|
def get_metrics_collector(self):
if ((self.metrics_collector is None) or (not isinstance(self.metrics_collector, MetricsCollector))):
raise RuntimeError('Metrics collector is not registered in this context')
return self.metrics_collector
|
def invoke_hook_prepare(self):
"invoke task hooks for after the spout/bolt's initialize() method"
for task_hook in self.task_hooks:
task_hook.prepare(self.get_cluster_config(), self)
| -8,962,826,239,330,806,000
|
invoke task hooks for after the spout/bolt's initialize() method
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
invoke_hook_prepare
|
kalimfaria/heron
|
python
|
def invoke_hook_prepare(self):
for task_hook in self.task_hooks:
task_hook.prepare(self.get_cluster_config(), self)
|
def invoke_hook_cleanup(self):
"invoke task hooks for just before the spout/bolt's cleanup method"
for task_hook in self.task_hooks:
task_hook.clean_up()
| -149,705,493,558,228,670
|
invoke task hooks for just before the spout/bolt's cleanup method
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
invoke_hook_cleanup
|
kalimfaria/heron
|
python
|
def invoke_hook_cleanup(self):
for task_hook in self.task_hooks:
task_hook.clean_up()
|
def invoke_hook_emit(self, values, stream_id, out_tasks):
'invoke task hooks for every time a tuple is emitted in spout/bolt\n\n :type values: list\n :param values: values emitted\n :type stream_id: str\n :param stream_id: stream id into which tuple is emitted\n :type out_tasks: list\n :param out_tasks: list of custom grouping target task id\n '
if (len(self.task_hooks) > 0):
emit_info = EmitInfo(values=values, stream_id=stream_id, task_id=self.get_task_id(), out_tasks=out_tasks)
for task_hook in self.task_hooks:
task_hook.emit(emit_info)
| -8,364,756,215,371,977,000
|
invoke task hooks for every time a tuple is emitted in spout/bolt
:type values: list
:param values: values emitted
:type stream_id: str
:param stream_id: stream id into which tuple is emitted
:type out_tasks: list
:param out_tasks: list of custom grouping target task id
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
invoke_hook_emit
|
kalimfaria/heron
|
python
|
def invoke_hook_emit(self, values, stream_id, out_tasks):
'invoke task hooks for every time a tuple is emitted in spout/bolt\n\n :type values: list\n :param values: values emitted\n :type stream_id: str\n :param stream_id: stream id into which tuple is emitted\n :type out_tasks: list\n :param out_tasks: list of custom grouping target task id\n '
if (len(self.task_hooks) > 0):
emit_info = EmitInfo(values=values, stream_id=stream_id, task_id=self.get_task_id(), out_tasks=out_tasks)
for task_hook in self.task_hooks:
task_hook.emit(emit_info)
|
def invoke_hook_spout_ack(self, message_id, complete_latency_ns):
'invoke task hooks for every time spout acks a tuple\n\n :type message_id: str\n :param message_id: message id to which an acked tuple was anchored\n :type complete_latency_ns: float\n :param complete_latency_ns: complete latency in nano seconds\n '
if (len(self.task_hooks) > 0):
spout_ack_info = SpoutAckInfo(message_id=message_id, spout_task_id=self.get_task_id(), complete_latency_ms=(complete_latency_ns * system_constants.NS_TO_MS))
for task_hook in self.task_hooks:
task_hook.spout_ack(spout_ack_info)
| 9,094,690,015,681,524,000
|
invoke task hooks for every time spout acks a tuple
:type message_id: str
:param message_id: message id to which an acked tuple was anchored
:type complete_latency_ns: float
:param complete_latency_ns: complete latency in nano seconds
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
invoke_hook_spout_ack
|
kalimfaria/heron
|
python
|
def invoke_hook_spout_ack(self, message_id, complete_latency_ns):
'invoke task hooks for every time spout acks a tuple\n\n :type message_id: str\n :param message_id: message id to which an acked tuple was anchored\n :type complete_latency_ns: float\n :param complete_latency_ns: complete latency in nano seconds\n '
if (len(self.task_hooks) > 0):
spout_ack_info = SpoutAckInfo(message_id=message_id, spout_task_id=self.get_task_id(), complete_latency_ms=(complete_latency_ns * system_constants.NS_TO_MS))
for task_hook in self.task_hooks:
task_hook.spout_ack(spout_ack_info)
|
def invoke_hook_spout_fail(self, message_id, fail_latency_ns):
'invoke task hooks for every time spout fails a tuple\n\n :type message_id: str\n :param message_id: message id to which a failed tuple was anchored\n :type fail_latency_ns: float\n :param fail_latency_ns: fail latency in nano seconds\n '
if (len(self.task_hooks) > 0):
spout_fail_info = SpoutFailInfo(message_id=message_id, spout_task_id=self.get_task_id(), fail_latency_ms=(fail_latency_ns * system_constants.NS_TO_MS))
for task_hook in self.task_hooks:
task_hook.spout_fail(spout_fail_info)
| 2,162,557,886,614,590,500
|
invoke task hooks for every time spout fails a tuple
:type message_id: str
:param message_id: message id to which a failed tuple was anchored
:type fail_latency_ns: float
:param fail_latency_ns: fail latency in nano seconds
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
invoke_hook_spout_fail
|
kalimfaria/heron
|
python
|
def invoke_hook_spout_fail(self, message_id, fail_latency_ns):
'invoke task hooks for every time spout fails a tuple\n\n :type message_id: str\n :param message_id: message id to which a failed tuple was anchored\n :type fail_latency_ns: float\n :param fail_latency_ns: fail latency in nano seconds\n '
if (len(self.task_hooks) > 0):
spout_fail_info = SpoutFailInfo(message_id=message_id, spout_task_id=self.get_task_id(), fail_latency_ms=(fail_latency_ns * system_constants.NS_TO_MS))
for task_hook in self.task_hooks:
task_hook.spout_fail(spout_fail_info)
|
def invoke_hook_bolt_execute(self, heron_tuple, execute_latency_ns):
'invoke task hooks for every time bolt processes a tuple\n\n :type heron_tuple: HeronTuple\n :param heron_tuple: tuple that is executed\n :type execute_latency_ns: float\n :param execute_latency_ns: execute latency in nano seconds\n '
if (len(self.task_hooks) > 0):
bolt_execute_info = BoltExecuteInfo(heron_tuple=heron_tuple, executing_task_id=self.get_task_id(), execute_latency_ms=(execute_latency_ns * system_constants.NS_TO_MS))
for task_hook in self.task_hooks:
task_hook.bolt_execute(bolt_execute_info)
| 5,612,779,335,163,985,000
|
invoke task hooks for every time bolt processes a tuple
:type heron_tuple: HeronTuple
:param heron_tuple: tuple that is executed
:type execute_latency_ns: float
:param execute_latency_ns: execute latency in nano seconds
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
invoke_hook_bolt_execute
|
kalimfaria/heron
|
python
|
def invoke_hook_bolt_execute(self, heron_tuple, execute_latency_ns):
'invoke task hooks for every time bolt processes a tuple\n\n :type heron_tuple: HeronTuple\n :param heron_tuple: tuple that is executed\n :type execute_latency_ns: float\n :param execute_latency_ns: execute latency in nano seconds\n '
if (len(self.task_hooks) > 0):
bolt_execute_info = BoltExecuteInfo(heron_tuple=heron_tuple, executing_task_id=self.get_task_id(), execute_latency_ms=(execute_latency_ns * system_constants.NS_TO_MS))
for task_hook in self.task_hooks:
task_hook.bolt_execute(bolt_execute_info)
|
def invoke_hook_bolt_ack(self, heron_tuple, process_latency_ns):
'invoke task hooks for every time bolt acks a tuple\n\n :type heron_tuple: HeronTuple\n :param heron_tuple: tuple that is acked\n :type process_latency_ns: float\n :param process_latency_ns: process latency in nano seconds\n '
if (len(self.task_hooks) > 0):
bolt_ack_info = BoltAckInfo(heron_tuple=heron_tuple, acking_task_id=self.get_task_id(), process_latency_ms=(process_latency_ns * system_constants.NS_TO_MS))
for task_hook in self.task_hooks:
task_hook.bolt_ack(bolt_ack_info)
| -8,833,921,388,376,193,000
|
invoke task hooks for every time bolt acks a tuple
:type heron_tuple: HeronTuple
:param heron_tuple: tuple that is acked
:type process_latency_ns: float
:param process_latency_ns: process latency in nano seconds
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
invoke_hook_bolt_ack
|
kalimfaria/heron
|
python
|
def invoke_hook_bolt_ack(self, heron_tuple, process_latency_ns):
'invoke task hooks for every time bolt acks a tuple\n\n :type heron_tuple: HeronTuple\n :param heron_tuple: tuple that is acked\n :type process_latency_ns: float\n :param process_latency_ns: process latency in nano seconds\n '
if (len(self.task_hooks) > 0):
bolt_ack_info = BoltAckInfo(heron_tuple=heron_tuple, acking_task_id=self.get_task_id(), process_latency_ms=(process_latency_ns * system_constants.NS_TO_MS))
for task_hook in self.task_hooks:
task_hook.bolt_ack(bolt_ack_info)
|
def invoke_hook_bolt_fail(self, heron_tuple, fail_latency_ns):
'invoke task hooks for every time bolt fails a tuple\n\n :type heron_tuple: HeronTuple\n :param heron_tuple: tuple that is failed\n :type fail_latency_ns: float\n :param fail_latency_ns: fail latency in nano seconds\n '
if (len(self.task_hooks) > 0):
bolt_fail_info = BoltFailInfo(heron_tuple=heron_tuple, failing_task_id=self.get_task_id(), fail_latency_ms=(fail_latency_ns * system_constants.NS_TO_MS))
for task_hook in self.task_hooks:
task_hook.bolt_fail(bolt_fail_info)
| -1,143,828,988,706,259,700
|
invoke task hooks for every time bolt fails a tuple
:type heron_tuple: HeronTuple
:param heron_tuple: tuple that is failed
:type fail_latency_ns: float
:param fail_latency_ns: fail latency in nano seconds
|
heron/instance/src/python/utils/topology/topology_context_impl.py
|
invoke_hook_bolt_fail
|
kalimfaria/heron
|
python
|
def invoke_hook_bolt_fail(self, heron_tuple, fail_latency_ns):
'invoke task hooks for every time bolt fails a tuple\n\n :type heron_tuple: HeronTuple\n :param heron_tuple: tuple that is failed\n :type fail_latency_ns: float\n :param fail_latency_ns: fail latency in nano seconds\n '
if (len(self.task_hooks) > 0):
bolt_fail_info = BoltFailInfo(heron_tuple=heron_tuple, failing_task_id=self.get_task_id(), fail_latency_ms=(fail_latency_ns * system_constants.NS_TO_MS))
for task_hook in self.task_hooks:
task_hook.bolt_fail(bolt_fail_info)
|
def powerset(lst):
'returns the power set of the list - the set of all subsets of the list'
if (lst == []):
return [[]]
lose_it = powerset(lst[1:])
use_it = map((lambda subset: ([lst[0]] + subset)), lose_it)
return (lose_it + use_it)
| 5,827,662,631,286,967,000
|
returns the power set of the list - the set of all subsets of the list
|
use_it_or_lose_it.py
|
powerset
|
jschmidtnj/CS115
|
python
|
def powerset(lst):
if (lst == []):
return [[]]
lose_it = powerset(lst[1:])
use_it = map((lambda subset: ([lst[0]] + subset)), lose_it)
return (lose_it + use_it)
|
def subset(target, lst):
'determines whether or not it is possible to create target sum using the\n values in the list. Values in teh list can be positive, negative, or zero.'
if (target == 0):
return True
if (lst == []):
return False
'and and or are short-cut operators in python. THe second operand is not evaluated\n when the overall result can be deduced by evaluating the second operand'
return (subset((target - lst[0]), lst[1:]) or subset(target, lst[1:]))
| -8,148,369,691,226,853,000
|
determines whether or not it is possible to create target sum using the
values in the list. Values in teh list can be positive, negative, or zero.
|
use_it_or_lose_it.py
|
subset
|
jschmidtnj/CS115
|
python
|
def subset(target, lst):
'determines whether or not it is possible to create target sum using the\n values in the list. Values in teh list can be positive, negative, or zero.'
if (target == 0):
return True
if (lst == []):
return False
'and and or are short-cut operators in python. THe second operand is not evaluated\n when the overall result can be deduced by evaluating the second operand'
return (subset((target - lst[0]), lst[1:]) or subset(target, lst[1:]))
|
def subset_with_values(target, lst):
'Determines whether or not it is possible to create the target sum using\n values in the list. Values in the list can be positive, negative, or zero.\n The function returns a tuple of exactly two items. The first is a boolean,\n that indicates true if the sum is possible and false if it is not. The second\n element in the tuple is a list of all values that add up to make the target sum.'
if (target == 0):
return (True, [])
if (lst == []):
return (False, [])
use_it = subset_with_values((target - lst[0]), lst[1:])
if use_it[0]:
return (True, ([lst[0]] + use_it[1]))
return subset_with_values(target, lst[1:])
| 8,734,992,016,607,454,000
|
Determines whether or not it is possible to create the target sum using
values in the list. Values in the list can be positive, negative, or zero.
The function returns a tuple of exactly two items. The first is a boolean,
that indicates true if the sum is possible and false if it is not. The second
element in the tuple is a list of all values that add up to make the target sum.
|
use_it_or_lose_it.py
|
subset_with_values
|
jschmidtnj/CS115
|
python
|
def subset_with_values(target, lst):
'Determines whether or not it is possible to create the target sum using\n values in the list. Values in the list can be positive, negative, or zero.\n The function returns a tuple of exactly two items. The first is a boolean,\n that indicates true if the sum is possible and false if it is not. The second\n element in the tuple is a list of all values that add up to make the target sum.'
if (target == 0):
return (True, [])
if (lst == []):
return (False, [])
use_it = subset_with_values((target - lst[0]), lst[1:])
if use_it[0]:
return (True, ([lst[0]] + use_it[1]))
return subset_with_values(target, lst[1:])
|
def LCSWithValues(S1, S2):
'returns the longest common string'
if ((S1 == '') or (S2 == '')):
return (0, '')
if (S1[0] == S2[0]):
result = LCSWithValues(S1[1:], S2[1:])
return ((1 + result[0]), (S1[0] + result[1]))
useS1 = LCSWithValues(S1, S2[1:])
useS2 = LCSWithValues(S1[1:], S2)
if (useS1[0] > useS2[0]):
return useS1
return useS2
| -1,862,823,565,770,770,700
|
returns the longest common string
|
use_it_or_lose_it.py
|
LCSWithValues
|
jschmidtnj/CS115
|
python
|
def LCSWithValues(S1, S2):
if ((S1 == ) or (S2 == )):
return (0, )
if (S1[0] == S2[0]):
result = LCSWithValues(S1[1:], S2[1:])
return ((1 + result[0]), (S1[0] + result[1]))
useS1 = LCSWithValues(S1, S2[1:])
useS2 = LCSWithValues(S1[1:], S2)
if (useS1[0] > useS2[0]):
return useS1
return useS2
|
def _shuffle_inputs(input_tensors, capacity, min_after_dequeue, num_threads):
'Shuffles tensors in `input_tensors`, maintaining grouping.'
shuffle_queue = tf.RandomShuffleQueue(capacity, min_after_dequeue, dtypes=[t.dtype for t in input_tensors])
enqueue_op = shuffle_queue.enqueue(input_tensors)
runner = tf.train.QueueRunner(shuffle_queue, ([enqueue_op] * num_threads))
tf.train.add_queue_runner(runner)
output_tensors = shuffle_queue.dequeue()
for i in range(len(input_tensors)):
output_tensors[i].set_shape(input_tensors[i].shape)
return output_tensors
| 2,964,247,498,888,477,700
|
Shuffles tensors in `input_tensors`, maintaining grouping.
|
magenta/common/sequence_example_lib.py
|
_shuffle_inputs
|
KenniVelez/magenta
|
python
|
def _shuffle_inputs(input_tensors, capacity, min_after_dequeue, num_threads):
shuffle_queue = tf.RandomShuffleQueue(capacity, min_after_dequeue, dtypes=[t.dtype for t in input_tensors])
enqueue_op = shuffle_queue.enqueue(input_tensors)
runner = tf.train.QueueRunner(shuffle_queue, ([enqueue_op] * num_threads))
tf.train.add_queue_runner(runner)
output_tensors = shuffle_queue.dequeue()
for i in range(len(input_tensors)):
output_tensors[i].set_shape(input_tensors[i].shape)
return output_tensors
|
def get_padded_batch(file_list, batch_size, input_size, label_shape=None, num_enqueuing_threads=4, shuffle=False):
'Reads batches of SequenceExamples from TFRecords and pads them.\n\n Can deal with variable length SequenceExamples by padding each batch to the\n length of the longest sequence with zeros.\n\n Args:\n file_list: A list of paths to TFRecord files containing SequenceExamples.\n batch_size: The number of SequenceExamples to include in each batch.\n input_size: The size of each input vector. The returned batch of inputs\n will have a shape [batch_size, num_steps, input_size].\n label_shape: Shape for labels. If not specified, will use [].\n num_enqueuing_threads: The number of threads to use for enqueuing\n SequenceExamples.\n shuffle: Whether to shuffle the batches.\n\n Returns:\n inputs: A tensor of shape [batch_size, num_steps, input_size] of floats32s.\n labels: A tensor of shape [batch_size, num_steps] of int64s.\n lengths: A tensor of shape [batch_size] of int32s. The lengths of each\n SequenceExample before padding.\n Raises:\n ValueError: If `shuffle` is True and `num_enqueuing_threads` is less than 2.\n '
file_queue = tf.train.string_input_producer(file_list)
reader = tf.TFRecordReader()
(_, serialized_example) = reader.read(file_queue)
sequence_features = {'inputs': tf.FixedLenSequenceFeature(shape=[input_size], dtype=tf.float32), 'labels': tf.FixedLenSequenceFeature(shape=(label_shape or []), dtype=tf.int64)}
(_, sequence) = tf.parse_single_sequence_example(serialized_example, sequence_features=sequence_features)
length = tf.shape(sequence['inputs'])[0]
input_tensors = [sequence['inputs'], sequence['labels'], length]
if shuffle:
if (num_enqueuing_threads < 2):
raise ValueError('`num_enqueuing_threads` must be at least 2 when shuffling.')
shuffle_threads = int((math.ceil(num_enqueuing_threads) / 2.0))
min_after_dequeue = count_records(file_list, stop_at=SHUFFLE_MIN_AFTER_DEQUEUE)
input_tensors = _shuffle_inputs(input_tensors, capacity=QUEUE_CAPACITY, min_after_dequeue=min_after_dequeue, num_threads=shuffle_threads)
num_enqueuing_threads -= shuffle_threads
tf.logging.info(input_tensors)
return tf.train.batch(input_tensors, batch_size=batch_size, capacity=QUEUE_CAPACITY, num_threads=num_enqueuing_threads, dynamic_pad=True, allow_smaller_final_batch=False)
| 4,064,438,629,566,444,000
|
Reads batches of SequenceExamples from TFRecords and pads them.
Can deal with variable length SequenceExamples by padding each batch to the
length of the longest sequence with zeros.
Args:
file_list: A list of paths to TFRecord files containing SequenceExamples.
batch_size: The number of SequenceExamples to include in each batch.
input_size: The size of each input vector. The returned batch of inputs
will have a shape [batch_size, num_steps, input_size].
label_shape: Shape for labels. If not specified, will use [].
num_enqueuing_threads: The number of threads to use for enqueuing
SequenceExamples.
shuffle: Whether to shuffle the batches.
Returns:
inputs: A tensor of shape [batch_size, num_steps, input_size] of floats32s.
labels: A tensor of shape [batch_size, num_steps] of int64s.
lengths: A tensor of shape [batch_size] of int32s. The lengths of each
SequenceExample before padding.
Raises:
ValueError: If `shuffle` is True and `num_enqueuing_threads` is less than 2.
|
magenta/common/sequence_example_lib.py
|
get_padded_batch
|
KenniVelez/magenta
|
python
|
def get_padded_batch(file_list, batch_size, input_size, label_shape=None, num_enqueuing_threads=4, shuffle=False):
'Reads batches of SequenceExamples from TFRecords and pads them.\n\n Can deal with variable length SequenceExamples by padding each batch to the\n length of the longest sequence with zeros.\n\n Args:\n file_list: A list of paths to TFRecord files containing SequenceExamples.\n batch_size: The number of SequenceExamples to include in each batch.\n input_size: The size of each input vector. The returned batch of inputs\n will have a shape [batch_size, num_steps, input_size].\n label_shape: Shape for labels. If not specified, will use [].\n num_enqueuing_threads: The number of threads to use for enqueuing\n SequenceExamples.\n shuffle: Whether to shuffle the batches.\n\n Returns:\n inputs: A tensor of shape [batch_size, num_steps, input_size] of floats32s.\n labels: A tensor of shape [batch_size, num_steps] of int64s.\n lengths: A tensor of shape [batch_size] of int32s. The lengths of each\n SequenceExample before padding.\n Raises:\n ValueError: If `shuffle` is True and `num_enqueuing_threads` is less than 2.\n '
file_queue = tf.train.string_input_producer(file_list)
reader = tf.TFRecordReader()
(_, serialized_example) = reader.read(file_queue)
sequence_features = {'inputs': tf.FixedLenSequenceFeature(shape=[input_size], dtype=tf.float32), 'labels': tf.FixedLenSequenceFeature(shape=(label_shape or []), dtype=tf.int64)}
(_, sequence) = tf.parse_single_sequence_example(serialized_example, sequence_features=sequence_features)
length = tf.shape(sequence['inputs'])[0]
input_tensors = [sequence['inputs'], sequence['labels'], length]
if shuffle:
if (num_enqueuing_threads < 2):
raise ValueError('`num_enqueuing_threads` must be at least 2 when shuffling.')
shuffle_threads = int((math.ceil(num_enqueuing_threads) / 2.0))
min_after_dequeue = count_records(file_list, stop_at=SHUFFLE_MIN_AFTER_DEQUEUE)
input_tensors = _shuffle_inputs(input_tensors, capacity=QUEUE_CAPACITY, min_after_dequeue=min_after_dequeue, num_threads=shuffle_threads)
num_enqueuing_threads -= shuffle_threads
tf.logging.info(input_tensors)
return tf.train.batch(input_tensors, batch_size=batch_size, capacity=QUEUE_CAPACITY, num_threads=num_enqueuing_threads, dynamic_pad=True, allow_smaller_final_batch=False)
|
def count_records(file_list, stop_at=None):
'Counts number of records in files from `file_list` up to `stop_at`.\n\n Args:\n file_list: List of TFRecord files to count records in.\n stop_at: Optional number of records to stop counting at.\n\n Returns:\n Integer number of records in files from `file_list` up to `stop_at`.\n '
num_records = 0
for tfrecord_file in file_list:
tf.logging.info('Counting records in %s.', tfrecord_file)
for _ in tf.python_io.tf_record_iterator(tfrecord_file):
num_records += 1
if (stop_at and (num_records >= stop_at)):
tf.logging.info('Number of records is at least %d.', num_records)
return num_records
tf.logging.info('Total records: %d', num_records)
return num_records
| 5,925,921,993,372,783,000
|
Counts number of records in files from `file_list` up to `stop_at`.
Args:
file_list: List of TFRecord files to count records in.
stop_at: Optional number of records to stop counting at.
Returns:
Integer number of records in files from `file_list` up to `stop_at`.
|
magenta/common/sequence_example_lib.py
|
count_records
|
KenniVelez/magenta
|
python
|
def count_records(file_list, stop_at=None):
'Counts number of records in files from `file_list` up to `stop_at`.\n\n Args:\n file_list: List of TFRecord files to count records in.\n stop_at: Optional number of records to stop counting at.\n\n Returns:\n Integer number of records in files from `file_list` up to `stop_at`.\n '
num_records = 0
for tfrecord_file in file_list:
tf.logging.info('Counting records in %s.', tfrecord_file)
for _ in tf.python_io.tf_record_iterator(tfrecord_file):
num_records += 1
if (stop_at and (num_records >= stop_at)):
tf.logging.info('Number of records is at least %d.', num_records)
return num_records
tf.logging.info('Total records: %d', num_records)
return num_records
|
def flatten_maybe_padded_sequences(maybe_padded_sequences, lengths=None):
'Flattens the batch of sequences, removing padding (if applicable).\n\n Args:\n maybe_padded_sequences: A tensor of possibly padded sequences to flatten,\n sized `[N, M, ...]` where M = max(lengths).\n lengths: Optional length of each sequence, sized `[N]`. If None, assumes no\n padding.\n\n Returns:\n flatten_maybe_padded_sequences: The flattened sequence tensor, sized\n `[sum(lengths), ...]`.\n '
def flatten_unpadded_sequences():
return tf.reshape(maybe_padded_sequences, ([(- 1)] + maybe_padded_sequences.shape.as_list()[2:]))
if (lengths is None):
return flatten_unpadded_sequences()
def flatten_padded_sequences():
indices = tf.where(tf.sequence_mask(lengths))
return tf.gather_nd(maybe_padded_sequences, indices)
return tf.cond(tf.equal(tf.reduce_min(lengths), tf.shape(maybe_padded_sequences)[1]), flatten_unpadded_sequences, flatten_padded_sequences)
| -4,121,728,141,681,414,700
|
Flattens the batch of sequences, removing padding (if applicable).
Args:
maybe_padded_sequences: A tensor of possibly padded sequences to flatten,
sized `[N, M, ...]` where M = max(lengths).
lengths: Optional length of each sequence, sized `[N]`. If None, assumes no
padding.
Returns:
flatten_maybe_padded_sequences: The flattened sequence tensor, sized
`[sum(lengths), ...]`.
|
magenta/common/sequence_example_lib.py
|
flatten_maybe_padded_sequences
|
KenniVelez/magenta
|
python
|
def flatten_maybe_padded_sequences(maybe_padded_sequences, lengths=None):
'Flattens the batch of sequences, removing padding (if applicable).\n\n Args:\n maybe_padded_sequences: A tensor of possibly padded sequences to flatten,\n sized `[N, M, ...]` where M = max(lengths).\n lengths: Optional length of each sequence, sized `[N]`. If None, assumes no\n padding.\n\n Returns:\n flatten_maybe_padded_sequences: The flattened sequence tensor, sized\n `[sum(lengths), ...]`.\n '
def flatten_unpadded_sequences():
return tf.reshape(maybe_padded_sequences, ([(- 1)] + maybe_padded_sequences.shape.as_list()[2:]))
if (lengths is None):
return flatten_unpadded_sequences()
def flatten_padded_sequences():
indices = tf.where(tf.sequence_mask(lengths))
return tf.gather_nd(maybe_padded_sequences, indices)
return tf.cond(tf.equal(tf.reduce_min(lengths), tf.shape(maybe_padded_sequences)[1]), flatten_unpadded_sequences, flatten_padded_sequences)
|
def _decode(loc: torch.Tensor, priors: torch.Tensor, variances: List[float]) -> torch.Tensor:
'Decode locations from predictions using priors to undo the encoding we did for offset regression at train\n time.\n\n Args:\n loc:location predictions for loc layers. Shape: [num_priors,4].\n priors: Prior boxes in center-offset form. Shape: [num_priors,4].\n variances: (list[float]) Variances of priorboxes.\n\n Return:\n Tensor containing decoded bounding box predictions.\n '
boxes = torch.cat(((priors[:, 0:2] + ((loc[:, 0:2] * variances[0]) * priors[:, 2:4])), (priors[:, 2:4] * torch.exp((loc[:, 2:4] * variances[1]))), (priors[:, 0:2] + ((loc[:, 4:6] * variances[0]) * priors[:, 2:4])), (priors[:, 0:2] + ((loc[:, 6:8] * variances[0]) * priors[:, 2:4])), (priors[:, 0:2] + ((loc[:, 8:10] * variances[0]) * priors[:, 2:4])), (priors[:, 0:2] + ((loc[:, 10:12] * variances[0]) * priors[:, 2:4])), (priors[:, 0:2] + ((loc[:, 12:14] * variances[0]) * priors[:, 2:4]))), 1)
tmp = (boxes[:, 0:2] - (boxes[:, 2:4] / 2))
return torch.cat((tmp, (boxes[:, 2:4] + tmp), boxes[:, 4:]), dim=(- 1))
| 9,088,783,656,471,098,000
|
Decode locations from predictions using priors to undo the encoding we did for offset regression at train
time.
Args:
loc:location predictions for loc layers. Shape: [num_priors,4].
priors: Prior boxes in center-offset form. Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes.
Return:
Tensor containing decoded bounding box predictions.
|
kornia/contrib/face_detection.py
|
_decode
|
Abdelrhman-Hosny/kornia
|
python
|
def _decode(loc: torch.Tensor, priors: torch.Tensor, variances: List[float]) -> torch.Tensor:
'Decode locations from predictions using priors to undo the encoding we did for offset regression at train\n time.\n\n Args:\n loc:location predictions for loc layers. Shape: [num_priors,4].\n priors: Prior boxes in center-offset form. Shape: [num_priors,4].\n variances: (list[float]) Variances of priorboxes.\n\n Return:\n Tensor containing decoded bounding box predictions.\n '
boxes = torch.cat(((priors[:, 0:2] + ((loc[:, 0:2] * variances[0]) * priors[:, 2:4])), (priors[:, 2:4] * torch.exp((loc[:, 2:4] * variances[1]))), (priors[:, 0:2] + ((loc[:, 4:6] * variances[0]) * priors[:, 2:4])), (priors[:, 0:2] + ((loc[:, 6:8] * variances[0]) * priors[:, 2:4])), (priors[:, 0:2] + ((loc[:, 8:10] * variances[0]) * priors[:, 2:4])), (priors[:, 0:2] + ((loc[:, 10:12] * variances[0]) * priors[:, 2:4])), (priors[:, 0:2] + ((loc[:, 12:14] * variances[0]) * priors[:, 2:4]))), 1)
tmp = (boxes[:, 0:2] - (boxes[:, 2:4] / 2))
return torch.cat((tmp, (boxes[:, 2:4] + tmp), boxes[:, 4:]), dim=(- 1))
|
def to(self, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> 'FaceDetectorResult':
'Like :func:`torch.nn.Module.to()` method.'
self._data = self._data.to(device=device, dtype=dtype)
return self
| -3,692,803,267,318,518,300
|
Like :func:`torch.nn.Module.to()` method.
|
kornia/contrib/face_detection.py
|
to
|
Abdelrhman-Hosny/kornia
|
python
|
def to(self, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> 'FaceDetectorResult':
self._data = self._data.to(device=device, dtype=dtype)
return self
|
@property
def xmin(self) -> torch.Tensor:
'The bounding box top-left x-coordinate.'
return self._data[(..., 0)]
| -3,581,150,055,712,332,000
|
The bounding box top-left x-coordinate.
|
kornia/contrib/face_detection.py
|
xmin
|
Abdelrhman-Hosny/kornia
|
python
|
@property
def xmin(self) -> torch.Tensor:
return self._data[(..., 0)]
|
@property
def ymin(self) -> torch.Tensor:
'The bounding box top-left y-coordinate.'
return self._data[(..., 1)]
| 8,193,021,596,356,398,000
|
The bounding box top-left y-coordinate.
|
kornia/contrib/face_detection.py
|
ymin
|
Abdelrhman-Hosny/kornia
|
python
|
@property
def ymin(self) -> torch.Tensor:
return self._data[(..., 1)]
|
@property
def xmax(self) -> torch.Tensor:
'The bounding box bottom-right x-coordinate.'
return self._data[(..., 2)]
| 3,209,420,580,495,309,000
|
The bounding box bottom-right x-coordinate.
|
kornia/contrib/face_detection.py
|
xmax
|
Abdelrhman-Hosny/kornia
|
python
|
@property
def xmax(self) -> torch.Tensor:
return self._data[(..., 2)]
|
@property
def ymax(self) -> torch.Tensor:
'The bounding box bottom-right y-coordinate.'
return self._data[(..., 3)]
| 9,078,629,932,612,555,000
|
The bounding box bottom-right y-coordinate.
|
kornia/contrib/face_detection.py
|
ymax
|
Abdelrhman-Hosny/kornia
|
python
|
@property
def ymax(self) -> torch.Tensor:
return self._data[(..., 3)]
|
def get_keypoint(self, keypoint: FaceKeypoint) -> torch.Tensor:
'The [x y] position of a given facial keypoint.\n\n Args:\n keypoint: the keypoint type to return the position.\n '
if (keypoint == FaceKeypoint.EYE_LEFT):
out = self._data[(..., (4, 5))]
elif (keypoint == FaceKeypoint.EYE_RIGHT):
out = self._data[(..., (6, 7))]
elif (keypoint == FaceKeypoint.NOSE):
out = self._data[(..., (8, 9))]
elif (keypoint == FaceKeypoint.MOUTH_LEFT):
out = self._data[(..., (10, 11))]
elif (keypoint == FaceKeypoint.MOUTH_RIGHT):
out = self._data[(..., (12, 13))]
else:
raise ValueError(f'Not valid keypoint type. Got: {keypoint}.')
return out
| 5,815,797,914,079,903,000
|
The [x y] position of a given facial keypoint.
Args:
keypoint: the keypoint type to return the position.
|
kornia/contrib/face_detection.py
|
get_keypoint
|
Abdelrhman-Hosny/kornia
|
python
|
def get_keypoint(self, keypoint: FaceKeypoint) -> torch.Tensor:
'The [x y] position of a given facial keypoint.\n\n Args:\n keypoint: the keypoint type to return the position.\n '
if (keypoint == FaceKeypoint.EYE_LEFT):
out = self._data[(..., (4, 5))]
elif (keypoint == FaceKeypoint.EYE_RIGHT):
out = self._data[(..., (6, 7))]
elif (keypoint == FaceKeypoint.NOSE):
out = self._data[(..., (8, 9))]
elif (keypoint == FaceKeypoint.MOUTH_LEFT):
out = self._data[(..., (10, 11))]
elif (keypoint == FaceKeypoint.MOUTH_RIGHT):
out = self._data[(..., (12, 13))]
else:
raise ValueError(f'Not valid keypoint type. Got: {keypoint}.')
return out
|
@property
def score(self) -> torch.Tensor:
'The detection score.'
return self._data[(..., 14)]
| -1,282,487,293,321,369,600
|
The detection score.
|
kornia/contrib/face_detection.py
|
score
|
Abdelrhman-Hosny/kornia
|
python
|
@property
def score(self) -> torch.Tensor:
return self._data[(..., 14)]
|
@property
def width(self) -> torch.Tensor:
'The bounding box width.'
return (self.xmax - self.xmin)
| -3,775,788,693,311,651,300
|
The bounding box width.
|
kornia/contrib/face_detection.py
|
width
|
Abdelrhman-Hosny/kornia
|
python
|
@property
def width(self) -> torch.Tensor:
return (self.xmax - self.xmin)
|
@property
def height(self) -> torch.Tensor:
'The bounding box height.'
return (self.ymax - self.ymin)
| 1,337,078,370,723,638,500
|
The bounding box height.
|
kornia/contrib/face_detection.py
|
height
|
Abdelrhman-Hosny/kornia
|
python
|
@property
def height(self) -> torch.Tensor:
return (self.ymax - self.ymin)
|
@property
def top_left(self) -> torch.Tensor:
'The [x y] position of the top-left coordinate of the bounding box.'
return self._data[(..., (0, 1))]
| 8,133,284,690,489,061,000
|
The [x y] position of the top-left coordinate of the bounding box.
|
kornia/contrib/face_detection.py
|
top_left
|
Abdelrhman-Hosny/kornia
|
python
|
@property
def top_left(self) -> torch.Tensor:
return self._data[(..., (0, 1))]
|
@property
def top_right(self) -> torch.Tensor:
'The [x y] position of the top-left coordinate of the bounding box.'
out = self.top_left
out[(..., 0)] += self.width
return out
| -266,048,192,071,190,720
|
The [x y] position of the top-left coordinate of the bounding box.
|
kornia/contrib/face_detection.py
|
top_right
|
Abdelrhman-Hosny/kornia
|
python
|
@property
def top_right(self) -> torch.Tensor:
out = self.top_left
out[(..., 0)] += self.width
return out
|
@property
def bottom_right(self) -> torch.Tensor:
'The [x y] position of the bottom-right coordinate of the bounding box.'
return self._data[(..., (2, 3))]
| 1,580,686,018,896,368,400
|
The [x y] position of the bottom-right coordinate of the bounding box.
|
kornia/contrib/face_detection.py
|
bottom_right
|
Abdelrhman-Hosny/kornia
|
python
|
@property
def bottom_right(self) -> torch.Tensor:
return self._data[(..., (2, 3))]
|
@property
def bottom_left(self) -> torch.Tensor:
'The [x y] position of the top-left coordinate of the bounding box.'
out = self.top_left
out[(..., 1)] += self.height
return out
| -7,967,264,993,067,659,000
|
The [x y] position of the top-left coordinate of the bounding box.
|
kornia/contrib/face_detection.py
|
bottom_left
|
Abdelrhman-Hosny/kornia
|
python
|
@property
def bottom_left(self) -> torch.Tensor:
out = self.top_left
out[(..., 1)] += self.height
return out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.