code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# Copyright (c) 2020, <NAME>. All rights reserved.
#
# This work is made available under the CC BY-NC-SA 4.0.
# To view a copy of this license, see LICENSE
import numpy as np
import scipy.ndimage
import PIL.Image
def create_perspective_transform_matrix(src, dst):
""" Creates a perspective transformation matrix which transforms points
in quadrilateral ``src`` to the corresponding points on quadrilateral
``dst``.
Will raise a ``np.linalg.LinAlgError`` on invalid input.
"""
# See:
# * http://xenia.media.mit.edu/~cwren/interpolator/
# * http://stackoverflow.com/a/14178717/71522
in_matrix = []
for (x, y), (X, Y) in zip(src, dst):
in_matrix.extend([
[x, y, 1, 0, 0, 0, -X * x, -X * y],
[0, 0, 0, x, y, 1, -Y * x, -Y * y],
])
A = np.matrix(in_matrix, dtype=np.float)
B = np.array(dst).reshape(8)
af = np.dot(np.linalg.inv(A.T * A) * A.T, B)
return np.append(np.array(af).reshape(8), 1).reshape((3, 3))
def create_perspective_transform(src, dst, round=False, splat_args=False):
""" Returns a function which will transform points in quadrilateral
``src`` to the corresponding points on quadrilateral ``dst``::
>>> transform = create_perspective_transform(
... [(0, 0), (10, 0), (10, 10), (0, 10)],
... [(50, 50), (100, 50), (100, 100), (50, 100)],
... )
>>> transform((5, 5))
(74.99999999999639, 74.999999999999957)
If ``round`` is ``True`` then points will be rounded to the nearest
integer and integer values will be returned.
>>> transform = create_perspective_transform(
... [(0, 0), (10, 0), (10, 10), (0, 10)],
... [(50, 50), (100, 50), (100, 100), (50, 100)],
... round=True,
... )
>>> transform((5, 5))
(75, 75)
If ``splat_args`` is ``True`` the function will accept two arguments
instead of a tuple.
>>> transform = create_perspective_transform(
... [(0, 0), (10, 0), (10, 10), (0, 10)],
... [(50, 50), (100, 50), (100, 100), (50, 100)],
... splat_args=True,
... )
>>> transform(5, 5)
(74.99999999999639, 74.999999999999957)
If the input values yield an invalid transformation matrix an identity
function will be returned and the ``error`` attribute will be set to a
description of the error::
>>> tranform = create_perspective_transform(
... np.zeros((4, 2)),
... np.zeros((4, 2)),
... )
>>> transform((5, 5))
(5.0, 5.0)
>>> transform.error
'invalid input quads (...): Singular matrix
"""
try:
transform_matrix = create_perspective_transform_matrix(src, dst)
error = None
except np.linalg.LinAlgError as e:
transform_matrix = np.identity(3, dtype=np.float)
error = "invalid input quads (%s and %s): %s" %(src, dst, e)
error = error.replace("\n", "")
to_eval = "def perspective_transform(%s):\n" %(
splat_args and "*pt" or "pt",
)
to_eval += " res = np.dot(transform_matrix, ((pt[0], ), (pt[1], ), (1, )))\n"
to_eval += " res = res / res[2]\n"
if round:
to_eval += " return (int(round(res[0][0])), int(round(res[1][0])))\n"
else:
to_eval += " return (res[0][0], res[1][0])\n"
locals = {
"transform_matrix": transform_matrix,
}
locals.update(globals())
exec(to_eval,locals,locals)
res = locals["perspective_transform"]
res.matrix = transform_matrix
res.error = error
return res
def align_mesh2stylegan(temp_tcoords, transformation_params):
temp_tcoords = temp_tcoords.copy()
temp_tcoords[:, 0] = temp_tcoords[:, 0] - transformation_params['crop'][1]
temp_tcoords[:, 1] = temp_tcoords[:, 1] - transformation_params['crop'][0]
temp_tcoords[:, 0] = temp_tcoords[:, 0] + transformation_params['pad'][1]
temp_tcoords[:, 1] = temp_tcoords[:, 1] + transformation_params['pad'][0]
h, w = (4096, 4096) # transformation_params['new_size']
transform = create_perspective_transform(
transformation_params['quad'],
[(0, 0), (0, h), (h, w), (w, 0)],
splat_args=True,
)
for i in range(len(temp_tcoords)):
temp_tcoords[i, 1], temp_tcoords[i, 0] = transform(temp_tcoords[i, 1], temp_tcoords[i, 0])
new_tcoords = temp_tcoords[:, ::-1] / (h, w) # transformation_params['new_size']
new_tcoords[:, 1] = 1 - new_tcoords[:, 1]
return new_tcoords
def align_im2stylegan(src_im, src_mask, face_landmarks, output_size=1024, transform_size=4096,
enable_padding=True, x_scale=1, y_scale=1, em_scale=0.1, alpha=False):
# Align function from FFHQ dataset pre-processing step
# https://github.com/NVlabs/ffhq-dataset/blob/master/download_ffhq.py
lm = np.array(face_landmarks)
lm_chin = lm[0: 17] # left-right
lm_eyebrow_left = lm[17: 22] # left-right
lm_eyebrow_right = lm[22: 27] # left-right
lm_nose = lm[27: 31] # top-down
lm_nostrils = lm[31: 36] # top-down
lm_eye_left = lm[36: 42] # left-clockwise
lm_eye_right = lm[42: 48] # left-clockwise
lm_mouth_outer = lm[48: 60] # left-clockwise
lm_mouth_inner = lm[60: 68] # left-clockwise
# Calculate auxiliary vectors.
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
eye_avg = (eye_left + eye_right) * 0.5
eye_to_eye = eye_right - eye_left
mouth_left = lm_mouth_outer[0]
mouth_right = lm_mouth_outer[6]
mouth_avg = (mouth_left + mouth_right) * 0.5
eye_to_mouth = mouth_avg - eye_avg
# Choose oriented crop rectangle.
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
x *= x_scale
y = np.flipud(x) * [-y_scale, y_scale]
c = eye_avg + eye_to_mouth * em_scale
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
qsize = np.hypot(*x) * 2
rsize = None
img = src_im.convert('RGBA').convert('RGB')
img_mask = src_mask.convert('L')
img.putalpha(img_mask)
# Shrink.
shrink = int(np.floor(qsize / output_size * 0.5))
if shrink > 1:
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
img = img.resize(rsize, PIL.Image.ANTIALIAS)
quad /= shrink
qsize /= shrink
# Crop.
border = max(int(np.rint(qsize * 0.1)), 3)
crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Pad.
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
max(pad[3] - img.size[1] + border, 0))
if enable_padding and max(pad) > border - 4:
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'constant')
h, w, _ = img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
blur = qsize * 0.02
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
img = np.uint8(np.clip(np.rint(img), 0, 255))
if alpha:
mask = 1 - np.clip(3.0 * mask, 0.0, 1.0)
mask = np.uint8(np.clip(np.rint(mask * 255), 0, 255))
img = np.concatenate((img, mask), axis=2)
img = PIL.Image.fromarray(img, 'RGBA')
else:
img = PIL.Image.fromarray(img, 'RGBA')
quad += pad[:2]
# Transform.
aligned_mask = PIL.Image.fromarray(np.uint8(img)[:, :, 3])
img = PIL.Image.fromarray(np.uint8(img)[:, :, :3])
img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(),
PIL.Image.BILINEAR)
aligned_mask = aligned_mask.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(),
PIL.Image.BILINEAR)
if output_size < transform_size:
img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
aligned_mask = aligned_mask.resize((output_size, output_size), PIL.Image.ANTIALIAS)
transformation_params = {
'rsize': rsize,
'crop': crop,
'pad': pad,
'quad': quad + 0.5,
'new_size': (output_size, output_size)
}
# Save aligned image.
return img, aligned_mask, transformation_params
| [
"numpy.identity",
"numpy.mean",
"numpy.clip",
"numpy.uint8",
"numpy.median",
"numpy.flipud",
"numpy.floor",
"numpy.stack",
"numpy.array",
"numpy.linalg.inv",
"numpy.rint",
"numpy.concatenate",
"numpy.hypot",
"numpy.matrix",
"numpy.float32"
] | [((835, 871), 'numpy.matrix', 'np.matrix', (['in_matrix'], {'dtype': 'np.float'}), '(in_matrix, dtype=np.float)\n', (844, 871), True, 'import numpy as np\n'), ((5077, 5101), 'numpy.array', 'np.array', (['face_landmarks'], {}), '(face_landmarks)\n', (5085, 5101), True, 'import numpy as np\n'), ((5603, 5631), 'numpy.mean', 'np.mean', (['lm_eye_left'], {'axis': '(0)'}), '(lm_eye_left, axis=0)\n', (5610, 5631), True, 'import numpy as np\n'), ((5652, 5681), 'numpy.mean', 'np.mean', (['lm_eye_right'], {'axis': '(0)'}), '(lm_eye_right, axis=0)\n', (5659, 5681), True, 'import numpy as np\n'), ((6061, 6073), 'numpy.hypot', 'np.hypot', (['*x'], {}), '(*x)\n', (6069, 6073), True, 'import numpy as np\n'), ((6280, 6334), 'numpy.stack', 'np.stack', (['[c - x - y, c - x + y, c + x + y, c + x - y]'], {}), '([c - x - y, c - x + y, c + x + y, c + x - y])\n', (6288, 6334), True, 'import numpy as np\n'), ((6184, 6196), 'numpy.flipud', 'np.flipud', (['x'], {}), '(x)\n', (6193, 6196), True, 'import numpy as np\n'), ((6351, 6363), 'numpy.hypot', 'np.hypot', (['*x'], {}), '(*x)\n', (6359, 6363), True, 'import numpy as np\n'), ((6556, 6591), 'numpy.floor', 'np.floor', (['(qsize / output_size * 0.5)'], {}), '(qsize / output_size * 0.5)\n', (6564, 6591), True, 'import numpy as np\n'), ((880, 893), 'numpy.array', 'np.array', (['dst'], {}), '(dst)\n', (888, 893), True, 'import numpy as np\n'), ((921, 943), 'numpy.linalg.inv', 'np.linalg.inv', (['(A.T * A)'], {}), '(A.T * A)\n', (934, 943), True, 'import numpy as np\n'), ((3044, 3074), 'numpy.identity', 'np.identity', (['(3)'], {'dtype': 'np.float'}), '(3, dtype=np.float)\n', (3055, 3074), True, 'import numpy as np\n'), ((6014, 6037), 'numpy.flipud', 'np.flipud', (['eye_to_mouth'], {}), '(eye_to_mouth)\n', (6023, 6037), True, 'import numpy as np\n'), ((6091, 6112), 'numpy.hypot', 'np.hypot', (['*eye_to_eye'], {}), '(*eye_to_eye)\n', (6099, 6112), True, 'import numpy as np\n'), ((6120, 6143), 'numpy.hypot', 'np.hypot', (['*eye_to_mouth'], {}), '(*eye_to_mouth)\n', (6128, 6143), True, 'import numpy as np\n'), ((6877, 6897), 'numpy.rint', 'np.rint', (['(qsize * 0.1)'], {}), '(qsize * 0.1)\n', (6884, 6897), True, 'import numpy as np\n'), ((7830, 7845), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (7840, 7845), True, 'import numpy as np\n'), ((8301, 8336), 'numpy.clip', 'np.clip', (['(mask * 3.0 + 1.0)', '(0.0)', '(1.0)'], {}), '(mask * 3.0 + 1.0, 0.0, 1.0)\n', (8308, 8336), True, 'import numpy as np\n'), ((8394, 8417), 'numpy.clip', 'np.clip', (['mask', '(0.0)', '(1.0)'], {}), '(mask, 0.0, 1.0)\n', (8401, 8417), True, 'import numpy as np\n'), ((8647, 8682), 'numpy.concatenate', 'np.concatenate', (['(img, mask)'], {'axis': '(2)'}), '((img, mask), axis=2)\n', (8661, 8682), True, 'import numpy as np\n'), ((8904, 8917), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (8912, 8917), True, 'import numpy as np\n'), ((8962, 8975), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (8970, 8975), True, 'import numpy as np\n'), ((7782, 7802), 'numpy.rint', 'np.rint', (['(qsize * 0.3)'], {}), '(qsize * 0.3)\n', (7789, 7802), True, 'import numpy as np\n'), ((8357, 8384), 'numpy.median', 'np.median', (['img'], {'axis': '(0, 1)'}), '(img, axis=(0, 1))\n', (8366, 8384), True, 'import numpy as np\n'), ((8453, 8465), 'numpy.rint', 'np.rint', (['img'], {}), '(img)\n', (8460, 8465), True, 'import numpy as np\n'), ((8525, 8554), 'numpy.clip', 'np.clip', (['(3.0 * mask)', '(0.0)', '(1.0)'], {}), '(3.0 * mask, 0.0, 1.0)\n', (8532, 8554), True, 'import numpy as np\n'), ((8595, 8614), 'numpy.rint', 'np.rint', (['(mask * 255)'], {}), '(mask * 255)\n', (8602, 8614), True, 'import numpy as np\n'), ((975, 987), 'numpy.array', 'np.array', (['af'], {}), '(af)\n', (983, 987), True, 'import numpy as np\n'), ((8027, 8040), 'numpy.float32', 'np.float32', (['x'], {}), '(x)\n', (8037, 8040), True, 'import numpy as np\n'), ((8051, 8072), 'numpy.float32', 'np.float32', (['(w - 1 - x)'], {}), '(w - 1 - x)\n', (8061, 8072), True, 'import numpy as np\n'), ((8131, 8144), 'numpy.float32', 'np.float32', (['y'], {}), '(y)\n', (8141, 8144), True, 'import numpy as np\n'), ((8155, 8176), 'numpy.float32', 'np.float32', (['(h - 1 - y)'], {}), '(h - 1 - y)\n', (8165, 8176), True, 'import numpy as np\n')] |
#
# Unary operator classes and methods
#
import numbers
import numpy as np
import pybamm
from scipy.sparse import csr_matrix
class Broadcast(pybamm.SpatialOperator):
"""A node in the expression tree representing a broadcasting operator.
Broadcasts a child to a specified domain. After discretisation, this will evaluate
to an array of the right shape for the specified domain.
For an example of broadcasts in action, see
`this example notebook
<https://github.com/pybamm-team/PyBaMM/blob/master/examples/notebooks/expression_tree/broadcasts.ipynb>`_
Parameters
----------
child : :class:`Symbol`
child node
broadcast_domain : iterable of str
Primary domain for broadcast. This will become the domain of the symbol
broadcast_auxiliary_domains : dict of str
Auxiliary domains for broadcast.
broadcast_type : str, optional
Whether to broadcast to the full domain (primary and secondary) or only in the
primary direction. Default is "full".
name : str
name of the node
**Extends:** :class:`SpatialOperator`
"""
def __init__(
self,
child,
broadcast_domain,
broadcast_auxiliary_domains=None,
broadcast_type="full to nodes",
name=None,
):
# Convert child to scalar if it is a number
if isinstance(child, numbers.Number):
child = pybamm.Scalar(child)
# Convert domain to list if it's a string
if isinstance(broadcast_domain, str):
broadcast_domain = [broadcast_domain]
if name is None:
name = "broadcast"
# perform some basic checks and set attributes
domain, auxiliary_domains = self.check_and_set_domains(
child, broadcast_type, broadcast_domain, broadcast_auxiliary_domains
)
self.broadcast_type = broadcast_type
self.broadcast_domain = broadcast_domain
super().__init__(name, child, domain, auxiliary_domains)
def _unary_simplify(self, simplified_child):
""" See :meth:`pybamm.UnaryOperator.simplify()`. """
return self._unary_new_copy(simplified_child)
class PrimaryBroadcast(Broadcast):
"""A node in the expression tree representing a primary broadcasting operator.
Broadcasts in a `primary` dimension only. That is, makes explicit copies of the
symbol in the domain specified by `broadcast_domain`. This should be used for
broadcasting from a "larger" scale to a "smaller" scale, for example broadcasting
temperature T(x) from the electrode to the particles, or broadcasting current
collector current i(y, z) from the current collector to the electrodes.
Parameters
----------
child : :class:`Symbol`
child node
broadcast_domain : iterable of str
Primary domain for broadcast. This will become the domain of the symbol
name : str
name of the node
**Extends:** :class:`SpatialOperator`
"""
def __init__(self, child, broadcast_domain, name=None):
super().__init__(
child, broadcast_domain, broadcast_type="primary to nodes", name=name
)
def check_and_set_domains(
self, child, broadcast_type, broadcast_domain, broadcast_auxiliary_domains
):
"See :meth:`Broadcast.check_and_set_domains`"
# Can only do primary broadcast from current collector to electrode or particle
# or from electrode to particle. Note current collector to particle *is* allowed
if child.domain == []:
pass
elif child.domain == ["current collector"] and broadcast_domain[0] not in [
"negative electrode",
"separator",
"positive electrode",
"negative particle",
"positive particle",
]:
raise pybamm.DomainError(
"""Primary broadcast from current collector domain must be to electrode
or separator or particle domains"""
)
elif (
child.domain[0]
in [
"negative electrode",
"separator",
"positive electrode",
]
and broadcast_domain[0] not in ["negative particle", "positive particle"]
):
raise pybamm.DomainError(
"""Primary broadcast from electrode or separator must be to particle
domains"""
)
elif child.domain[0] in ["negative particle", "positive particle"]:
raise pybamm.DomainError("Cannot do primary broadcast from particle domain")
domain = broadcast_domain
auxiliary_domains = {}
if child.domain != []:
auxiliary_domains["secondary"] = child.domain
if "secondary" in child.auxiliary_domains:
auxiliary_domains["tertiary"] = child.auxiliary_domains["secondary"]
return domain, auxiliary_domains
def _unary_new_copy(self, child):
""" See :meth:`pybamm.UnaryOperator._unary_new_copy()`. """
return self.__class__(child, self.broadcast_domain)
def _evaluate_for_shape(self):
"""
Returns a vector of NaNs to represent the shape of a Broadcast.
See :meth:`pybamm.Symbol.evaluate_for_shape_using_domain()`
"""
child_eval = self.children[0].evaluate_for_shape()
vec = pybamm.evaluate_for_shape_using_domain(self.domain)
return np.outer(child_eval, vec).reshape(-1, 1)
class PrimaryBroadcastToEdges(PrimaryBroadcast):
"A primary broadcast onto the edges of the domain"
def __init__(self, child, broadcast_domain, name=None):
name = name or "broadcast to edges"
super().__init__(child, broadcast_domain, name)
self.broadcast_type = "primary to edges"
def evaluates_on_edges(self, dimension):
return True
class SecondaryBroadcast(Broadcast):
"""A node in the expression tree representing a primary broadcasting operator.
Broadcasts in a `secondary` dimension only. That is, makes explicit copies of the
symbol in the domain specified by `broadcast_domain`. This should be used for
broadcasting from a "smaller" scale to a "larger" scale, for example broadcasting
SPM particle concentrations c_s(r) from the particles to the electrodes. Note that
this wouldn't be used to broadcast particle concentrations in the DFN, since these
already depend on both x and r.
Parameters
----------
child : :class:`Symbol`
child node
broadcast_domain : iterable of str
Primary domain for broadcast. This will become the domain of the symbol
name : str
name of the node
**Extends:** :class:`SpatialOperator`
"""
def __init__(self, child, broadcast_domain, name=None):
super().__init__(
child, broadcast_domain, broadcast_type="secondary to nodes", name=name
)
def check_and_set_domains(
self, child, broadcast_type, broadcast_domain, broadcast_auxiliary_domains
):
"See :meth:`Broadcast.check_and_set_domains`"
if child.domain == []:
raise TypeError(
"Cannot take SecondaryBroadcast of an object with empty domain. "
"Use PrimaryBroadcast instead."
)
# Can only do secondary broadcast from particle to electrode or from
# electrode to current collector
if child.domain[0] in [
"negative particle",
"positive particle",
] and broadcast_domain[0] not in [
"negative electrode",
"separator",
"positive electrode",
]:
raise pybamm.DomainError(
"""Secondary broadcast from particle domain must be to electrode or
separator domains"""
)
elif (
child.domain[0]
in [
"negative electrode",
"separator",
"positive electrode",
]
and broadcast_domain != ["current collector"]
):
raise pybamm.DomainError(
"""Secondary broadcast from electrode or separator must be to
current collector domains"""
)
elif child.domain == ["current collector"]:
raise pybamm.DomainError(
"Cannot do secondary broadcast from current collector domain"
)
# Domain stays the same as child domain and broadcast domain is secondary
# domain
domain = child.domain
auxiliary_domains = {"secondary": broadcast_domain}
# Child's secondary domain becomes tertiary domain
if "secondary" in child.auxiliary_domains:
auxiliary_domains["tertiary"] = child.auxiliary_domains["secondary"]
return domain, auxiliary_domains
def _unary_new_copy(self, child):
""" See :meth:`pybamm.UnaryOperator._unary_new_copy()`. """
return SecondaryBroadcast(child, self.broadcast_domain)
def _evaluate_for_shape(self):
"""
Returns a vector of NaNs to represent the shape of a Broadcast.
See :meth:`pybamm.Symbol.evaluate_for_shape_using_domain()`
"""
child_eval = self.children[0].evaluate_for_shape()
vec = pybamm.evaluate_for_shape_using_domain(self.domain)
return np.outer(vec, child_eval).reshape(-1, 1)
class SecondaryBroadcastToEdges(SecondaryBroadcast):
"A secondary broadcast onto the edges of a domain"
def __init__(self, child, broadcast_domain, name=None):
name = name or "broadcast to edges"
super().__init__(child, broadcast_domain, name)
self.broadcast_type = "secondary to edges"
def evaluates_on_edges(self, dimension):
return True
class FullBroadcast(Broadcast):
"A class for full broadcasts"
def __init__(self, child, broadcast_domain, auxiliary_domains, name=None):
if isinstance(auxiliary_domains, str):
auxiliary_domains = {"secondary": auxiliary_domains}
super().__init__(
child,
broadcast_domain,
broadcast_auxiliary_domains=auxiliary_domains,
broadcast_type="full to nodes",
name=name,
)
def check_and_set_domains(
self, child, broadcast_type, broadcast_domain, broadcast_auxiliary_domains
):
"See :meth:`Broadcast.check_and_set_domains`"
# Variables on the current collector can only be broadcast to 'primary'
if child.domain == ["current collector"]:
raise pybamm.DomainError(
"Cannot do full broadcast from current collector domain"
)
domain = broadcast_domain
auxiliary_domains = broadcast_auxiliary_domains or {}
return domain, auxiliary_domains
def _unary_new_copy(self, child):
""" See :meth:`pybamm.UnaryOperator._unary_new_copy()`. """
return FullBroadcast(child, self.broadcast_domain, self.auxiliary_domains)
def _evaluate_for_shape(self):
"""
Returns a vector of NaNs to represent the shape of a Broadcast.
See :meth:`pybamm.Symbol.evaluate_for_shape_using_domain()`
"""
child_eval = self.children[0].evaluate_for_shape()
vec = pybamm.evaluate_for_shape_using_domain(
self.domain, self.auxiliary_domains
)
return child_eval * vec
class FullBroadcastToEdges(FullBroadcast):
"""
A full broadcast onto the edges of a domain (edges of primary dimension, nodes of
other dimensions)
"""
def __init__(self, child, broadcast_domain, auxiliary_domains, name=None):
name = name or "broadcast to edges"
super().__init__(child, broadcast_domain, auxiliary_domains, name)
self.broadcast_type = "full to edges"
def evaluates_on_edges(self, dimension):
return True
def full_like(symbols, fill_value):
"""
Returns an array with the same shape, domain and auxiliary domains as the sum of the
input symbols, with a constant value given by `fill_value`.
Parameters
----------
symbols : :class:`Symbol`
Symbols whose shape to copy
fill_value : number
Value to assign
"""
# Make a symbol that combines all the children, to get the right domain
# that takes all the child symbols into account
sum_symbol = symbols[0]
for sym in symbols[1:]:
sum_symbol += sym
# Just return scalar if symbol shape is scalar
if sum_symbol.evaluates_to_number():
return pybamm.Scalar(fill_value)
try:
shape = sum_symbol.shape
# use vector or matrix
if shape[1] == 1:
array_type = pybamm.Vector
else:
array_type = pybamm.Matrix
# return dense array, except for a matrix of zeros
if shape[1] != 1 and fill_value == 0:
entries = csr_matrix(shape)
else:
entries = fill_value * np.ones(shape)
return array_type(
entries,
domain=sum_symbol.domain,
auxiliary_domains=sum_symbol.auxiliary_domains,
)
except NotImplementedError:
return FullBroadcast(
fill_value, sum_symbol.domain, sum_symbol.auxiliary_domains
)
def zeros_like(*symbols):
"""
Returns an array with the same shape, domain and auxiliary domains as the sum of the
input symbols, with each entry equal to zero.
Parameters
----------
symbols : :class:`Symbol`
Symbols whose shape to copy
"""
return full_like(symbols, 0)
def ones_like(*symbols):
"""
Returns an array with the same shape, domain and auxiliary domains as the sum of the
input symbols, with each entry equal to one.
Parameters
----------
symbols : :class:`Symbol`
Symbols whose shape to copy
"""
return full_like(symbols, 1)
| [
"numpy.ones",
"pybamm.evaluate_for_shape_using_domain",
"pybamm.Scalar",
"numpy.outer",
"scipy.sparse.csr_matrix",
"pybamm.DomainError"
] | [((5397, 5448), 'pybamm.evaluate_for_shape_using_domain', 'pybamm.evaluate_for_shape_using_domain', (['self.domain'], {}), '(self.domain)\n', (5435, 5448), False, 'import pybamm\n'), ((9329, 9380), 'pybamm.evaluate_for_shape_using_domain', 'pybamm.evaluate_for_shape_using_domain', (['self.domain'], {}), '(self.domain)\n', (9367, 9380), False, 'import pybamm\n'), ((11329, 11404), 'pybamm.evaluate_for_shape_using_domain', 'pybamm.evaluate_for_shape_using_domain', (['self.domain', 'self.auxiliary_domains'], {}), '(self.domain, self.auxiliary_domains)\n', (11367, 11404), False, 'import pybamm\n'), ((12610, 12635), 'pybamm.Scalar', 'pybamm.Scalar', (['fill_value'], {}), '(fill_value)\n', (12623, 12635), False, 'import pybamm\n'), ((1420, 1440), 'pybamm.Scalar', 'pybamm.Scalar', (['child'], {}), '(child)\n', (1433, 1440), False, 'import pybamm\n'), ((7703, 7837), 'pybamm.DomainError', 'pybamm.DomainError', (['"""Secondary broadcast from particle domain must be to electrode or\n separator domains"""'], {}), '(\n """Secondary broadcast from particle domain must be to electrode or\n separator domains"""\n )\n', (7721, 7837), False, 'import pybamm\n'), ((10621, 10697), 'pybamm.DomainError', 'pybamm.DomainError', (['"""Cannot do full broadcast from current collector domain"""'], {}), "('Cannot do full broadcast from current collector domain')\n", (10639, 10697), False, 'import pybamm\n'), ((12954, 12971), 'scipy.sparse.csr_matrix', 'csr_matrix', (['shape'], {}), '(shape)\n', (12964, 12971), False, 'from scipy.sparse import csr_matrix\n'), ((3849, 4002), 'pybamm.DomainError', 'pybamm.DomainError', (['"""Primary broadcast from current collector domain must be to electrode\n or separator or particle domains"""'], {}), '(\n """Primary broadcast from current collector domain must be to electrode\n or separator or particle domains"""\n )\n', (3867, 4002), False, 'import pybamm\n'), ((5464, 5489), 'numpy.outer', 'np.outer', (['child_eval', 'vec'], {}), '(child_eval, vec)\n', (5472, 5489), True, 'import numpy as np\n'), ((8124, 8260), 'pybamm.DomainError', 'pybamm.DomainError', (['"""Secondary broadcast from electrode or separator must be to\n current collector domains"""'], {}), '(\n """Secondary broadcast from electrode or separator must be to\n current collector domains"""\n )\n', (8142, 8260), False, 'import pybamm\n'), ((9396, 9421), 'numpy.outer', 'np.outer', (['vec', 'child_eval'], {}), '(vec, child_eval)\n', (9404, 9421), True, 'import numpy as np\n'), ((13021, 13035), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (13028, 13035), True, 'import numpy as np\n'), ((4317, 4442), 'pybamm.DomainError', 'pybamm.DomainError', (['"""Primary broadcast from electrode or separator must be to particle\n domains"""'], {}), '(\n """Primary broadcast from electrode or separator must be to particle\n domains"""\n )\n', (4335, 4442), False, 'import pybamm\n'), ((8351, 8437), 'pybamm.DomainError', 'pybamm.DomainError', (['"""Cannot do secondary broadcast from current collector domain"""'], {}), "(\n 'Cannot do secondary broadcast from current collector domain')\n", (8369, 8437), False, 'import pybamm\n'), ((4557, 4627), 'pybamm.DomainError', 'pybamm.DomainError', (['"""Cannot do primary broadcast from particle domain"""'], {}), "('Cannot do primary broadcast from particle domain')\n", (4575, 4627), False, 'import pybamm\n')] |
# --------------------------------------------------------
# FaceNet Datasets
# Licensed under The MIT License [see LICENSE for details]
# Copyright 2019 smarsu. All Rights Reserved.
# --------------------------------------------------------
import numpy as np
def euclidean_distance(a, b):
""""""
return np.sqrt(np.sum(np.square(a - b)))
| [
"numpy.square"
] | [((342, 358), 'numpy.square', 'np.square', (['(a - b)'], {}), '(a - b)\n', (351, 358), True, 'import numpy as np\n')] |
# coding: utf-8
import numpy as np
import matplotlib.pylab as plt
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def step_function(x):
return np.array(x > 0, dtype=np.int)
x = np.arange(-5.0, 5.0, 0.1)
y1 = sigmoid(x)
y2 = step_function(x)
plt.plot(x, y1)
plt.plot(x, y2, 'k--')
plt.ylim(-0.1, 1.1) #指定图中绘制的y轴的范围
plt.show()
| [
"numpy.exp",
"numpy.array",
"matplotlib.pylab.show",
"matplotlib.pylab.plot",
"matplotlib.pylab.ylim",
"numpy.arange"
] | [((202, 227), 'numpy.arange', 'np.arange', (['(-5.0)', '(5.0)', '(0.1)'], {}), '(-5.0, 5.0, 0.1)\n', (211, 227), True, 'import numpy as np\n'), ((271, 286), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y1'], {}), '(x, y1)\n', (279, 286), True, 'import matplotlib.pylab as plt\n'), ((288, 310), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y2', '"""k--"""'], {}), "(x, y2, 'k--')\n", (296, 310), True, 'import matplotlib.pylab as plt\n'), ((312, 331), 'matplotlib.pylab.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (320, 331), True, 'import matplotlib.pylab as plt\n'), ((347, 357), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (355, 357), True, 'import matplotlib.pylab as plt\n'), ((165, 194), 'numpy.array', 'np.array', (['(x > 0)'], {'dtype': 'np.int'}), '(x > 0, dtype=np.int)\n', (173, 194), True, 'import numpy as np\n'), ((110, 120), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (116, 120), True, 'import numpy as np\n')] |
import re
import string
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
def custom_activation(x):
return tf.nn.tanh(x) ** 2
class CustomLayer(keras.layers.Layer):
def __init__(self, units=32, **kwargs):
super(CustomLayer, self).__init__(**kwargs)
self.units = tf.Variable(units, name="units")
def call(self, inputs, training=False):
if training:
return inputs * self.units
else:
return inputs
def get_config(self):
config = super(CustomLayer, self).get_config()
config.update({"units": self.units.numpy()})
return config
def KerasSequentialModel() -> keras.models.Model:
net = keras.models.Sequential(
(
keras.layers.Dense(
units=1,
input_shape=(5,),
use_bias=False,
kernel_initializer=keras.initializers.Ones(),
),
)
)
opt = keras.optimizers.Adam(0.002, 0.5)
net.compile(optimizer=opt, loss="binary_crossentropy", metrics=["accuracy"])
return net
def KerasNLPModel() -> keras.models.Model:
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
def custom_standardization(input_data: str) -> tf.Tensor:
lowercase = tf.strings.lower(input_data)
stripped_html = tf.strings.regex_replace(lowercase, "<br />", " ")
return tf.strings.regex_replace(
stripped_html, "[%s]" % re.escape(string.punctuation), ""
)
max_features = 20000
embedding_dims = 50
vectorize_layer = TextVectorization(
standardize=custom_standardization,
max_tokens=max_features,
output_mode="int",
output_sequence_length=400,
)
# A text input with preprocessing layers
text_input = keras.Input(shape=(1,), dtype=tf.string, name="text")
x = vectorize_layer(text_input)
x = keras.layers.Embedding(max_features + 1, embedding_dims)(x)
x = keras.layers.Dropout(0.2)(x)
# Conv1D + global max pooling
x = keras.layers.Conv1D(128, 7, padding="valid", activation="relu", strides=1)(x)
x = keras.layers.GlobalMaxPooling1D()(x)
# We add a vanilla hidden layer:
x = keras.layers.Dense(128, activation="relu")(x)
x = keras.layers.Dropout(0.2)(x)
# We project onto a single unit output layer, and squash it with a sigmoid:
predictions = keras.layers.Dense(1, activation="sigmoid", name="predictions")(x)
model = keras.Model(text_input, predictions)
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
return model
class NativeModel(tf.Module):
def __init__(self):
super().__init__()
self.weights = np.asfarray([[1.0], [1.0], [1.0], [1.0], [1.0]])
self.dense = lambda inputs: tf.matmul(inputs, self.weights)
@tf.function(
input_signature=[tf.TensorSpec(shape=[1, 5], dtype=tf.float64, name="inputs")]
)
def __call__(self, inputs):
return self.dense(inputs)
class NativeRaggedModel(NativeModel):
@tf.function(
input_signature=[
tf.RaggedTensorSpec(tf.TensorShape([None, None]), tf.float64, 1, tf.int64)
]
)
def __call__(self, inputs):
inputs = inputs.to_tensor(shape=[None, 5], default_value=0)
return self.dense(inputs)
class MultiInputModel(tf.Module):
def __init__(self):
super().__init__()
self.weights = np.asfarray([[1.0], [1.0], [1.0], [1.0], [1.0]])
self.dense = lambda tensor: tf.matmul(tensor, self.weights)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[1, 5], dtype=tf.float64, name="x1"),
tf.TensorSpec(shape=[1, 5], dtype=tf.float64, name="x2"),
tf.TensorSpec(shape=(), dtype=tf.float64, name="factor"),
]
)
def __call__(self, x1: tf.Tensor, x2: tf.Tensor, factor: tf.Tensor):
return self.dense(x1 + x2 * factor)
| [
"re.escape",
"numpy.asfarray",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.initializers.Ones",
"tensorflow.keras.layers.GlobalMaxPooling1D",
"tensorflow.matmul",
"tensorflow.keras.layers.Conv1D",
"tensorflow.nn.tanh",
"tensorflow.Variable",
"tensorflow.keras.layers.Dropout",
"tensorflow.T... | [((975, 1008), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', (['(0.002)', '(0.5)'], {}), '(0.002, 0.5)\n', (996, 1008), True, 'import tensorflow.keras as keras\n'), ((1616, 1746), 'tensorflow.keras.layers.experimental.preprocessing.TextVectorization', 'TextVectorization', ([], {'standardize': 'custom_standardization', 'max_tokens': 'max_features', 'output_mode': '"""int"""', 'output_sequence_length': '(400)'}), "(standardize=custom_standardization, max_tokens=\n max_features, output_mode='int', output_sequence_length=400)\n", (1633, 1746), False, 'from tensorflow.keras.layers.experimental.preprocessing import TextVectorization\n'), ((1844, 1897), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1,)', 'dtype': 'tf.string', 'name': '"""text"""'}), "(shape=(1,), dtype=tf.string, name='text')\n", (1855, 1897), True, 'import tensorflow.keras as keras\n'), ((2513, 2549), 'tensorflow.keras.Model', 'keras.Model', (['text_input', 'predictions'], {}), '(text_input, predictions)\n', (2524, 2549), True, 'import tensorflow.keras as keras\n'), ((140, 153), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['x'], {}), '(x)\n', (150, 153), True, 'import tensorflow as tf\n'), ((317, 349), 'tensorflow.Variable', 'tf.Variable', (['units'], {'name': '"""units"""'}), "(units, name='units')\n", (328, 349), True, 'import tensorflow as tf\n'), ((1318, 1346), 'tensorflow.strings.lower', 'tf.strings.lower', (['input_data'], {}), '(input_data)\n', (1334, 1346), True, 'import tensorflow as tf\n'), ((1371, 1421), 'tensorflow.strings.regex_replace', 'tf.strings.regex_replace', (['lowercase', '"""<br />"""', '""" """'], {}), "(lowercase, '<br />', ' ')\n", (1395, 1421), True, 'import tensorflow as tf\n'), ((1942, 1998), 'tensorflow.keras.layers.Embedding', 'keras.layers.Embedding', (['(max_features + 1)', 'embedding_dims'], {}), '(max_features + 1, embedding_dims)\n', (1964, 1998), True, 'import tensorflow.keras as keras\n'), ((2010, 2035), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (2030, 2035), True, 'import tensorflow.keras as keras\n'), ((2082, 2156), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', (['(128)', '(7)'], {'padding': '"""valid"""', 'activation': '"""relu"""', 'strides': '(1)'}), "(128, 7, padding='valid', activation='relu', strides=1)\n", (2101, 2156), True, 'import tensorflow.keras as keras\n'), ((2168, 2201), 'tensorflow.keras.layers.GlobalMaxPooling1D', 'keras.layers.GlobalMaxPooling1D', ([], {}), '()\n', (2199, 2201), True, 'import tensorflow.keras as keras\n'), ((2251, 2293), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2269, 2293), True, 'import tensorflow.keras as keras\n'), ((2305, 2330), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (2325, 2330), True, 'import tensorflow.keras as keras\n'), ((2433, 2496), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""', 'name': '"""predictions"""'}), "(1, activation='sigmoid', name='predictions')\n", (2451, 2496), True, 'import tensorflow.keras as keras\n'), ((2759, 2807), 'numpy.asfarray', 'np.asfarray', (['[[1.0], [1.0], [1.0], [1.0], [1.0]]'], {}), '([[1.0], [1.0], [1.0], [1.0], [1.0]])\n', (2770, 2807), True, 'import numpy as np\n'), ((3485, 3533), 'numpy.asfarray', 'np.asfarray', (['[[1.0], [1.0], [1.0], [1.0], [1.0]]'], {}), '([[1.0], [1.0], [1.0], [1.0], [1.0]])\n', (3496, 3533), True, 'import numpy as np\n'), ((2844, 2875), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.weights'], {}), '(inputs, self.weights)\n', (2853, 2875), True, 'import tensorflow as tf\n'), ((3570, 3601), 'tensorflow.matmul', 'tf.matmul', (['tensor', 'self.weights'], {}), '(tensor, self.weights)\n', (3579, 3601), True, 'import tensorflow as tf\n'), ((1499, 1528), 're.escape', 're.escape', (['string.punctuation'], {}), '(string.punctuation)\n', (1508, 1528), False, 'import re\n'), ((2920, 2980), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[1, 5]', 'dtype': 'tf.float64', 'name': '"""inputs"""'}), "(shape=[1, 5], dtype=tf.float64, name='inputs')\n", (2933, 2980), True, 'import tensorflow as tf\n'), ((3659, 3715), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[1, 5]', 'dtype': 'tf.float64', 'name': '"""x1"""'}), "(shape=[1, 5], dtype=tf.float64, name='x1')\n", (3672, 3715), True, 'import tensorflow as tf\n'), ((3729, 3785), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[1, 5]', 'dtype': 'tf.float64', 'name': '"""x2"""'}), "(shape=[1, 5], dtype=tf.float64, name='x2')\n", (3742, 3785), True, 'import tensorflow as tf\n'), ((3799, 3855), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '()', 'dtype': 'tf.float64', 'name': '"""factor"""'}), "(shape=(), dtype=tf.float64, name='factor')\n", (3812, 3855), True, 'import tensorflow as tf\n'), ((907, 932), 'tensorflow.keras.initializers.Ones', 'keras.initializers.Ones', ([], {}), '()\n', (930, 932), True, 'import tensorflow.keras as keras\n'), ((3170, 3198), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, None]'], {}), '([None, None])\n', (3184, 3198), True, 'import tensorflow as tf\n')] |
import numpy as np
from keras import models
import json, base64, cv2
import FLutils
from FLutils.weight_summarizer import WeightSummarizer
class Server:
def __init__(self, model_fn,
weight_summarizer: WeightSummarizer,
nb_clients: int = 100):
self.nb_clients = nb_clients
self.weight_summarizer = weight_summarizer
# Initialize the global model's weights
self.model_fn = model_fn
self.global_test_metrics_dict = {k: [] for k in model_fn.metrics_names}
self.global_model_weights = model_fn.get_weights()
FLutils.get_rid_of_the_models(model_fn)
self.global_train_losses = None
self.round_losses = []
self.client_model_weights = []
self.client_test_accuracy = []
self.client_test_loss = []
self.client_test_density_distribution = []
self.client_history = []
# Training parameters used by the clients
self.client_train_params_dict = {"batch_size": 32,
"val_batch_size": 64,
"epochs": [1,1,1],
"max_label_length":32,
"verbose": 1,
"image_size": [32,100],
"char_file": ""}
def _create_model_with_updated_weights(self, model=None) -> models.Model:
if model is None:
model = self.model_fn
model.set_weights(self.global_model_weights)
return model
def send_model(self, client):
client.receive_and_init_model(self.model_fn, self.global_model_weights)
def init_for_new_round(self):
# Reset the collected weights
self.client_model_weights.clear()
# Reset epoch losses
self.round_losses.clear()
self.client_test_accuracy.clear()
self.client_test_loss.clear()
self.client_test_density_distribution.clear()
self.client_history.clear()
def process_client_test_result(self, fl_strategy_metrics):
client_test_result = []
if fl_strategy_metrics=="acc":
np_client_test_accuracy = np.array(self.client_test_accuracy).transpose() # transpose data from dataset-wise to model-wise
client_history_acc = [his[fl_strategy_metrics][0] for his in self.client_history]
for ind, result in enumerate(np_client_test_accuracy):
temp_value = sum(result)
client_test_result.append(temp_value*client_history_acc[ind]/sum(client_history_acc))
else:
np_client_test_loss = np.array(self.client_test_loss).transpose()
client_history_loss = [1/np.mean(his[fl_strategy_metrics]) for his in self.client_history] # calculate average loss if epoch more than 1.
for ind, result in enumerate(np_client_test_loss):
temp_value = sum(result)
client_test_result.append(1/temp_value * client_history_loss[ind]/sum(client_history_loss))
for value in client_test_result:
self.client_test_density_distribution.append(value / sum(client_test_result))
def summarize_weights(self):
new_weights = self.weight_summarizer.process(client_weight_list=self.client_model_weights,
density_distribution=self.client_test_density_distribution)
self.global_model_weights = new_weights
def test_global_model(self, testModel, test_data, char_to_id):
model = self._create_model_with_updated_weights(testModel)
id_to_char = {v: k for k, v in char_to_id.items()}
cur = 0
tol = 0
for data in test_data:
temp = json.loads(data.strip('\r\n'))
label = temp['label'].upper()
ori_img = temp['img'].encode('utf-8')
ori_img = cv2.imdecode(np.frombuffer(base64.b64decode(ori_img), np.uint8), 1)
if len(ori_img.shape) < 3 or ori_img.shape[2] == 1:
ori_img = cv2.merge([ori_img, ori_img, ori_img])
img_processed = cv2.resize(ori_img, (int(ori_img.shape[1] * (32 / ori_img.shape[0])), 32))
try: _ = [char_to_id[j] for j in label]
except: continue
if img_processed.shape[1] < 100: continue
if len(label) < 3: continue
if len(label) > img_processed.shape[1] // 4: continue
img_processed = (np.array(img_processed, 'f') - 127.5) / 127.5
x = np.zeros((1, 32, img_processed.shape[1], 3), dtype=np.float32)
x[0] = img_processed
tol+=1
pred_num = model.predict(x, verbose=0)
pred_list = FLutils.fast_ctc_decode(pred_num, 0)
pred_label = u''.join([id_to_char[x] for [x, _, _] in pred_list])
if pred_label.upper() == label.upper(): cur += 1
results = [0, cur/tol]
results_dict = dict(zip(self.model_fn.metrics_names, results))
for metric_name, value in results_dict.items():
self.global_test_metrics_dict[metric_name].append(value)
FLutils.get_rid_of_the_models(model)
return results_dict
def evaluate_global_model(self, client_train_dict, test_data: np.ndarray):
model = self._create_model_with_updated_weights()
data_generator = FLutils.generator(client_train_dict, test_data, "test")
batch_size = min(client_train_dict["batch_size"], len(test_data))
hist = model.evaluate_generator(data_generator,
steps=len(test_data) // batch_size,
verbose=0)
results_dict = dict(zip(model.metrics_names, hist))
for metric_name, value in results_dict.items():
self.global_test_metrics_dict[metric_name].append(value)
FLutils.get_rid_of_the_models(model)
return results_dict
def save_model_weights(self, path: str):
model = self._create_model_with_updated_weights()
model.save_weights(str(path), overwrite=True)
FLutils.get_rid_of_the_models(model)
def load_model_weights(self, path: str, by_name: bool = False):
model = self._create_model_with_updated_weights()
model.load_weights(str(path), by_name=by_name)
self.global_model_weights = model.get_weights()
FLutils.get_rid_of_the_models(model)
| [
"FLutils.fast_ctc_decode",
"cv2.merge",
"numpy.mean",
"base64.b64decode",
"FLutils.get_rid_of_the_models",
"numpy.zeros",
"numpy.array",
"FLutils.generator"
] | [((599, 638), 'FLutils.get_rid_of_the_models', 'FLutils.get_rid_of_the_models', (['model_fn'], {}), '(model_fn)\n', (628, 638), False, 'import FLutils\n'), ((5168, 5204), 'FLutils.get_rid_of_the_models', 'FLutils.get_rid_of_the_models', (['model'], {}), '(model)\n', (5197, 5204), False, 'import FLutils\n'), ((5396, 5451), 'FLutils.generator', 'FLutils.generator', (['client_train_dict', 'test_data', '"""test"""'], {}), "(client_train_dict, test_data, 'test')\n", (5413, 5451), False, 'import FLutils\n'), ((5908, 5944), 'FLutils.get_rid_of_the_models', 'FLutils.get_rid_of_the_models', (['model'], {}), '(model)\n', (5937, 5944), False, 'import FLutils\n'), ((6139, 6175), 'FLutils.get_rid_of_the_models', 'FLutils.get_rid_of_the_models', (['model'], {}), '(model)\n', (6168, 6175), False, 'import FLutils\n'), ((6422, 6458), 'FLutils.get_rid_of_the_models', 'FLutils.get_rid_of_the_models', (['model'], {}), '(model)\n', (6451, 6458), False, 'import FLutils\n'), ((4567, 4629), 'numpy.zeros', 'np.zeros', (['(1, 32, img_processed.shape[1], 3)'], {'dtype': 'np.float32'}), '((1, 32, img_processed.shape[1], 3), dtype=np.float32)\n', (4575, 4629), True, 'import numpy as np\n'), ((4757, 4793), 'FLutils.fast_ctc_decode', 'FLutils.fast_ctc_decode', (['pred_num', '(0)'], {}), '(pred_num, 0)\n', (4780, 4793), False, 'import FLutils\n'), ((4093, 4131), 'cv2.merge', 'cv2.merge', (['[ori_img, ori_img, ori_img]'], {}), '([ori_img, ori_img, ori_img])\n', (4102, 4131), False, 'import json, base64, cv2\n'), ((2234, 2269), 'numpy.array', 'np.array', (['self.client_test_accuracy'], {}), '(self.client_test_accuracy)\n', (2242, 2269), True, 'import numpy as np\n'), ((2683, 2714), 'numpy.array', 'np.array', (['self.client_test_loss'], {}), '(self.client_test_loss)\n', (2691, 2714), True, 'import numpy as np\n'), ((2764, 2797), 'numpy.mean', 'np.mean', (['his[fl_strategy_metrics]'], {}), '(his[fl_strategy_metrics])\n', (2771, 2797), True, 'import numpy as np\n'), ((3962, 3987), 'base64.b64decode', 'base64.b64decode', (['ori_img'], {}), '(ori_img)\n', (3978, 3987), False, 'import json, base64, cv2\n'), ((4505, 4533), 'numpy.array', 'np.array', (['img_processed', '"""f"""'], {}), "(img_processed, 'f')\n", (4513, 4533), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from numpy import real, min as np_min, max as np_max, zeros, hstack
from ....Classes.MeshMat import MeshMat
from ....Classes.MeshVTK import MeshVTK
from ....definitions import config_dict
COLOR_MAP = config_dict["PLOT"]["COLOR_DICT"]["COLOR_MAP"]
def plot_deflection(
self,
*args,
label=None,
index=None,
indices=None,
clim=None,
factor=None,
field_name=None,
group_names=None,
save_path=None,
title="",
win_title=None,
is_surf=True,
is_show_fig=True,
):
"""Plot the operational deflection shape using pyvista plotter.
Parameters
----------
self : MeshSolution
a MeshSolution object
label : str
a label
index : int
an index
indices : list
list of the points to extract (optional)
clim : list
a list of 2 elements for the limits of the colorbar
factor : float
factor to multiply vector field
field_name : str
title of the field to display on plot
is_show_fig : bool
To call show at the end of the method
Returns
-------
"""
if group_names is not None:
meshsol_grp = self.get_group(group_names)
meshsol_grp.plot_deflection(
*args,
label=label,
index=index,
indices=indices,
clim=clim,
factor=factor,
field_name=field_name,
save_path=save_path,
title=title,
win_title=win_title,
is_surf=is_surf,
is_show_fig=is_show_fig,
group_names=None,
)
else:
if save_path is None:
try:
import pyvistaqt as pv
is_pyvistaqt = True
except:
import pyvista as pv
is_pyvistaqt = False
else:
import pyvista as pv
is_pyvistaqt = False
if save_path is None:
try:
import pyvistaqt as pv
is_pyvistaqt = True
except:
import pyvista as pv
is_pyvistaqt = False
else:
import pyvista as pv
is_pyvistaqt = False
if title != "" and win_title == "":
win_title = title
elif win_title != "" and title == "":
title = win_title
# Get mesh and field
mesh_pv, field, field_name = self.get_mesh_field_pv(
*args,
label=label,
index=index,
indices=indices,
field_name=field_name,
is_radial=True,
)
mesh = MeshVTK(mesh=mesh_pv, is_pyvista_mesh=True)
_, vect_field, _ = self.get_mesh_field_pv(
*args,
label=label,
index=index,
indices=indices,
field_name=field_name,
is_radial=False,
)
if field_name is None:
if label is not None:
field_name = label
elif self.get_solution(index=index).label is not None:
field_name = self.get_solution(index=index).label
else:
field_name = "Field"
# Compute colorbar boundaries
if clim is None:
clim = [np_min(real(field)), np_max(real(field))]
if (clim[1] - clim[0]) / clim[1] < 0.01:
clim[0] = -abs(clim[1])
clim[1] = abs(clim[1])
# Compute deformation factor
if factor is None:
# factor = 1 / (100 * clim[1])
factor = 1 / clim[1] * 10
# Add third dimension if needed
solution = self.get_solution(
label=label,
index=index,
)
if solution.dimension == 2:
vect_field = hstack((vect_field, zeros((vect_field.shape[0], 1))))
# Extract surface
if is_surf:
surf = mesh.get_surf(indices=indices)
else:
surf = mesh.get_mesh_pv(indices=indices)
# Add field to surf
surf.vectors = real(vect_field) * factor
# Warp by vectors
surf_warp = surf.warp_by_vector()
# Add radial field
surf_warp[field_name] = real(field)
# Configure plot
if is_pyvistaqt:
p = pv.BackgroundPlotter()
p.set_background("white")
else:
pv.set_plot_theme("document")
p = pv.Plotter(notebook=False, title=win_title)
sargs = dict(
interactive=True,
title_font_size=20,
label_font_size=16,
font_family="arial",
color="black",
)
p.add_mesh(
mesh_pv, color="grey", opacity=1, show_edges=True, edge_color="white"
)
p.set_position((0.2, 0.2, 0.5))
p.reset_camera()
p.clear()
p.add_mesh(
surf_warp,
scalars=field_name,
opacity=1,
show_edges=False,
cmap=COLOR_MAP,
clim=clim,
scalar_bar_args=sargs,
)
p.add_text(title, position="upper_edge")
p.add_axes()
if self.dimension == 2:
p.view_xy()
if save_path is None and is_show_fig:
p.show()
elif save_path is not None:
p.show(interactive=False, screenshot=save_path)
| [
"pyvista.set_plot_theme",
"pyvista.BackgroundPlotter",
"numpy.real",
"numpy.zeros",
"pyvista.Plotter"
] | [((4251, 4262), 'numpy.real', 'real', (['field'], {}), '(field)\n', (4255, 4262), False, 'from numpy import real, min as np_min, max as np_max, zeros, hstack\n'), ((4096, 4112), 'numpy.real', 'real', (['vect_field'], {}), '(vect_field)\n', (4100, 4112), False, 'from numpy import real, min as np_min, max as np_max, zeros, hstack\n'), ((4330, 4352), 'pyvista.BackgroundPlotter', 'pv.BackgroundPlotter', ([], {}), '()\n', (4350, 4352), True, 'import pyvista as pv\n'), ((4417, 4446), 'pyvista.set_plot_theme', 'pv.set_plot_theme', (['"""document"""'], {}), "('document')\n", (4434, 4446), True, 'import pyvista as pv\n'), ((4463, 4506), 'pyvista.Plotter', 'pv.Plotter', ([], {'notebook': '(False)', 'title': 'win_title'}), '(notebook=False, title=win_title)\n', (4473, 4506), True, 'import pyvista as pv\n'), ((3313, 3324), 'numpy.real', 'real', (['field'], {}), '(field)\n', (3317, 3324), False, 'from numpy import real, min as np_min, max as np_max, zeros, hstack\n'), ((3334, 3345), 'numpy.real', 'real', (['field'], {}), '(field)\n', (3338, 3345), False, 'from numpy import real, min as np_min, max as np_max, zeros, hstack\n'), ((3846, 3877), 'numpy.zeros', 'zeros', (['(vect_field.shape[0], 1)'], {}), '((vect_field.shape[0], 1))\n', (3851, 3877), False, 'from numpy import real, min as np_min, max as np_max, zeros, hstack\n')] |
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import scipy.ndimage
import scipy.signal
import shutil
import display_pyutils
# Load the FOCUS packageimport sys
import sys
sys.path.append('/home/allie/projects/focus') # Just to remember where this path is from!
IM_DIR = '/home/allie/workspace/images'
def apply_averaging_filter(x, filter_size=5):
return np.convolve(signal, np.ones(filter_size,) / float(filter_size), mode='valid')
def apply_median_filter(x, filter_size=5):
return scipy.signal.medfilt(x, filter_size)
if __name__ == '__main__':
results_dirs = sorted(glob.glob('/home/allie/workspace/server_sync/*/*'))
fignum = 0
plt.close('all')
anomalousness_to_save = []
pars_to_save = []
for results_dir in reversed(results_dirs):
fignum+=1
print(results_dir)
print(os.path.join(results_dir, 'anomaly_ratings.npy'))
try:
anomalousness = np.load(os.path.join(results_dir, 'anomaly_ratings.npy'))
signal = anomalousness/(1.0-anomalousness)
# Smooth temporally
signal = apply_averaging_filter(signal, 100)
pars = pickle.load(open(os.path.join(results_dir, 'pars.pickle'), 'rb'))
feats_file = pars.paths.files.infile_features
plt.figure(fignum)
plt.fill_between(range(len(signal)), signal, facecolor=display_pyutils.GOOD_COLOR_CYCLE[0], alpha=1.0) # alpha=0.5
signal_sorted = np.sort(signal)
bottom_ninetyfive_percent = signal_sorted[:int(np.floor(len(signal_sorted) * 0.95))]
y_max = np.median(bottom_ninetyfive_percent) + 3*np.std(bottom_ninetyfive_percent)
plt.ylim([0, y_max])
videonum_as_str = os.path.basename(feats_file)
lambd = pars.algorithm.discriminability.lambd
max_buffer_size = pars.algorithm.discriminability.max_buffer_size
title = 'video: {}\nlambda: {}\nmax_buffer_size:{}'.format(videonum_as_str, lambd, max_buffer_size)
plt.title(title)
print('Saving figure to {}.png in workspace'.format(plt.gcf().number))
display_pyutils.save_fig_to_workspace()
results_figure_name = os.path.join(results_dir, 'anomaly_rating.png')
display_pyutils.savefig(results_figure_name)
print('Saving figure to {}'.format(results_figure_name))
thresholded_anom_results = (signal > (np.median(signal) + 2 * np.std(bottom_ninetyfive_percent))).astype(float) * signal
plt.clf()
plt.fill_between(range(len(thresholded_anom_results)), thresholded_anom_results, facecolor=display_pyutils.GOOD_COLOR_CYCLE[1], alpha=1.0, label='anomalous: {:.4g}%'.format(100.0 * np.sum(thresholded_anom_results > 0) / len(thresholded_anom_results)))
plt.legend()
plt.ylim([0, y_max])
plt.title(title)
print('Saving figure to {}'.format(results_figure_name.replace('rating', 'rating_thresholded')))
display_pyutils.savefig(results_figure_name.replace('rating', 'rating_thresholded'))
if videonum_as_str.find('1101') != -1 and lambd == 10:
print('results_dir of interest: {}'.format(results_dir))
anomalousness_to_save += [anomalousness]
pars_to_save += [pars]
anomalous_frames = [os.path.join('/home/allie/projects/aladdin/videos/LGW_20071101_E1_CAM1frames', 'image-%06d' % frame_num + '.png') for frame_num in np.where(thresholded_anom_results > 0)[0]]
destination_frames = [os.path.join('/home/allie/workspace/etc/1101_results', 'image-%06d' % frame_num + '.png') for frame_num in np.where(thresholded_anom_results > 0)[0]]
for src, dest in zip(anomalous_frames, destination_frames):
shutil.copyfile(src, dest)
video_id = '1108'
if videonum_as_str.find(video_id) != -1 and lambd == 10:
print('results_dir of interest: {}'.format(results_dir))
anomalousness_to_save += [anomalousness]
pars_to_save += [pars]
destination_dir = '/home/allie/workspace/etc/{}_results'.format(video_id)
os.mkdir(destination_dir)
anomalous_frames = [os.path.join('/home/allie/projects/aladdin/videos/LGW_2007{}_E1_CAM1frames'.format(video_id), 'image-%06d' % frame_num + '.png') for frame_num in np.where(thresholded_anom_results > 0)[0]]
destination_frames = [os.path.join(destination_dir, 'image-%06d' % frame_num + '.png') for frame_num in np.where(thresholded_anom_results > 0)[0]]
for src, dest in zip(anomalous_frames, destination_frames):
shutil.copyfile(src, dest)
except Exception as exc:
print(exc)
print('continuing...')
| [
"display_pyutils.savefig",
"sys.path.append",
"numpy.where",
"numpy.sort",
"matplotlib.pyplot.close",
"os.mkdir",
"matplotlib.pyplot.ylim",
"glob.glob",
"numpy.ones",
"matplotlib.pyplot.gcf",
"shutil.copyfile",
"numpy.std",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"display_... | [((212, 257), 'sys.path.append', 'sys.path.append', (['"""/home/allie/projects/focus"""'], {}), "('/home/allie/projects/focus')\n", (227, 257), False, 'import sys\n'), ((700, 716), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (709, 716), True, 'import matplotlib.pyplot as plt\n'), ((629, 679), 'glob.glob', 'glob.glob', (['"""/home/allie/workspace/server_sync/*/*"""'], {}), "('/home/allie/workspace/server_sync/*/*')\n", (638, 679), False, 'import glob\n'), ((423, 443), 'numpy.ones', 'np.ones', (['filter_size'], {}), '(filter_size)\n', (430, 443), True, 'import numpy as np\n'), ((876, 924), 'os.path.join', 'os.path.join', (['results_dir', '"""anomaly_ratings.npy"""'], {}), "(results_dir, 'anomaly_ratings.npy')\n", (888, 924), False, 'import os\n'), ((1324, 1342), 'matplotlib.pyplot.figure', 'plt.figure', (['fignum'], {}), '(fignum)\n', (1334, 1342), True, 'import matplotlib.pyplot as plt\n'), ((1498, 1513), 'numpy.sort', 'np.sort', (['signal'], {}), '(signal)\n', (1505, 1513), True, 'import numpy as np\n'), ((1718, 1738), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, y_max]'], {}), '([0, y_max])\n', (1726, 1738), True, 'import matplotlib.pyplot as plt\n'), ((1769, 1797), 'os.path.basename', 'os.path.basename', (['feats_file'], {}), '(feats_file)\n', (1785, 1797), False, 'import os\n'), ((2058, 2074), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2067, 2074), True, 'import matplotlib.pyplot as plt\n'), ((2170, 2209), 'display_pyutils.save_fig_to_workspace', 'display_pyutils.save_fig_to_workspace', ([], {}), '()\n', (2207, 2209), False, 'import display_pyutils\n'), ((2244, 2291), 'os.path.join', 'os.path.join', (['results_dir', '"""anomaly_rating.png"""'], {}), "(results_dir, 'anomaly_rating.png')\n", (2256, 2291), False, 'import os\n'), ((2304, 2348), 'display_pyutils.savefig', 'display_pyutils.savefig', (['results_figure_name'], {}), '(results_figure_name)\n', (2327, 2348), False, 'import display_pyutils\n'), ((2564, 2573), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2571, 2573), True, 'import matplotlib.pyplot as plt\n'), ((2850, 2862), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2860, 2862), True, 'import matplotlib.pyplot as plt\n'), ((2875, 2895), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, y_max]'], {}), '([0, y_max])\n', (2883, 2895), True, 'import matplotlib.pyplot as plt\n'), ((2908, 2924), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2917, 2924), True, 'import matplotlib.pyplot as plt\n'), ((975, 1023), 'os.path.join', 'os.path.join', (['results_dir', '"""anomaly_ratings.npy"""'], {}), "(results_dir, 'anomaly_ratings.npy')\n", (987, 1023), False, 'import os\n'), ((1631, 1667), 'numpy.median', 'np.median', (['bottom_ninetyfive_percent'], {}), '(bottom_ninetyfive_percent)\n', (1640, 1667), True, 'import numpy as np\n'), ((4279, 4304), 'os.mkdir', 'os.mkdir', (['destination_dir'], {}), '(destination_dir)\n', (4287, 4304), False, 'import os\n'), ((1205, 1245), 'os.path.join', 'os.path.join', (['results_dir', '"""pars.pickle"""'], {}), "(results_dir, 'pars.pickle')\n", (1217, 1245), False, 'import os\n'), ((1672, 1705), 'numpy.std', 'np.std', (['bottom_ninetyfive_percent'], {}), '(bottom_ninetyfive_percent)\n', (1678, 1705), True, 'import numpy as np\n'), ((3403, 3520), 'os.path.join', 'os.path.join', (['"""/home/allie/projects/aladdin/videos/LGW_20071101_E1_CAM1frames"""', "('image-%06d' % frame_num + '.png')"], {}), "('/home/allie/projects/aladdin/videos/LGW_20071101_E1_CAM1frames',\n 'image-%06d' % frame_num + '.png')\n", (3415, 3520), False, 'import os\n'), ((3615, 3708), 'os.path.join', 'os.path.join', (['"""/home/allie/workspace/etc/1101_results"""', "('image-%06d' % frame_num + '.png')"], {}), "('/home/allie/workspace/etc/1101_results', 'image-%06d' %\n frame_num + '.png')\n", (3627, 3708), False, 'import os\n'), ((3861, 3887), 'shutil.copyfile', 'shutil.copyfile', (['src', 'dest'], {}), '(src, dest)\n', (3876, 3887), False, 'import shutil\n'), ((4568, 4632), 'os.path.join', 'os.path.join', (['destination_dir', "('image-%06d' % frame_num + '.png')"], {}), "(destination_dir, 'image-%06d' % frame_num + '.png')\n", (4580, 4632), False, 'import os\n'), ((4789, 4815), 'shutil.copyfile', 'shutil.copyfile', (['src', 'dest'], {}), '(src, dest)\n', (4804, 4815), False, 'import shutil\n'), ((2139, 2148), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2146, 2148), True, 'import matplotlib.pyplot as plt\n'), ((3534, 3572), 'numpy.where', 'np.where', (['(thresholded_anom_results > 0)'], {}), '(thresholded_anom_results > 0)\n', (3542, 3572), True, 'import numpy as np\n'), ((3722, 3760), 'numpy.where', 'np.where', (['(thresholded_anom_results > 0)'], {}), '(thresholded_anom_results > 0)\n', (3730, 3760), True, 'import numpy as np\n'), ((4487, 4525), 'numpy.where', 'np.where', (['(thresholded_anom_results > 0)'], {}), '(thresholded_anom_results > 0)\n', (4495, 4525), True, 'import numpy as np\n'), ((4650, 4688), 'numpy.where', 'np.where', (['(thresholded_anom_results > 0)'], {}), '(thresholded_anom_results > 0)\n', (4658, 4688), True, 'import numpy as np\n'), ((2468, 2485), 'numpy.median', 'np.median', (['signal'], {}), '(signal)\n', (2477, 2485), True, 'import numpy as np\n'), ((2767, 2803), 'numpy.sum', 'np.sum', (['(thresholded_anom_results > 0)'], {}), '(thresholded_anom_results > 0)\n', (2773, 2803), True, 'import numpy as np\n'), ((2492, 2525), 'numpy.std', 'np.std', (['bottom_ninetyfive_percent'], {}), '(bottom_ninetyfive_percent)\n', (2498, 2525), True, 'import numpy as np\n')] |
import os
import logging
from torch.utils import data
import numpy as np
import yaml
from src.common import decide_total_volume_range, update_reso
import ipdb
st = ipdb.set_trace
logger = logging.getLogger(__name__)
# Fields
class Field(object):
''' Data fields class.
'''
def load(self, data_path, idx, category):
''' Loads a data point.
Args:
data_path (str): path to data file
idx (int): index of data point
category (int): index of category
'''
raise NotImplementedError
def check_complete(self, files):
''' Checks if set is complete.
Args:
files: files
'''
raise NotImplementedError
class Shapes3dDataset(data.Dataset):
''' 3D Shapes dataset class.
'''
def __init__(self, dataset_folder, fields, split=None,
categories=None, no_except=True, transform=None, cfg=None):
''' Initialization of the the 3D shape dataset.
Args:
dataset_folder (str): dataset folder
fields (dict): dictionary of fields
split (str): which split is used
categories (list): list of categories to use
no_except (bool): no exception
transform (callable): transformation applied to data points
cfg (yaml): config file
'''
# Attributes
self.dataset_folder = dataset_folder
self.fields = fields
self.no_except = no_except
self.transform = transform
self.cfg = cfg
# If categories is None, use all subfolders
if categories is None:
categories = os.listdir(dataset_folder)
categories = [c for c in categories
if os.path.isdir(os.path.join(dataset_folder, c))]
# Read metadata file
metadata_file = os.path.join(dataset_folder, 'metadata.yaml')
if os.path.exists(metadata_file):
with open(metadata_file, 'r') as f:
self.metadata = yaml.load(f)
else:
self.metadata = {
c: {'id': c, 'name': 'n/a'} for c in categories
}
# Set index
for c_idx, c in enumerate(categories):
self.metadata[c]['idx'] = c_idx
# Get all models
self.models = []
for c_idx, c in enumerate(categories):
subpath = os.path.join(dataset_folder, c)
if not os.path.isdir(subpath):
logger.warning('Category %s does not exist in dataset.' % c)
if split is None:
self.models += [
{'category': c, 'model': m} for m in [d for d in os.listdir(subpath) if (os.path.isdir(os.path.join(subpath, d)) and d != '') ]
]
else:
split_file = os.path.join(subpath, split + '.lst')
with open(split_file, 'r') as f:
models_c = f.read().split('\n')
if '' in models_c:
models_c.remove('')
self.models += [
{'category': c, 'model': m}
for m in models_c
]
# precompute
if self.cfg['data']['input_type'] == 'pointcloud_crop':
self.split = split
# proper resolution for feature plane/volume of the ENTIRE scene
query_vol_metric = self.cfg['data']['padding'] + 1
unit_size = self.cfg['data']['unit_size']
recep_field = 2**(cfg['model']['encoder_kwargs']['unet3d_kwargs']['num_levels'] + 2)
if 'unet' in cfg['model']['encoder_kwargs']:
depth = cfg['model']['encoder_kwargs']['unet_kwargs']['depth']
elif 'unet3d' in cfg['model']['encoder_kwargs']:
depth = cfg['model']['encoder_kwargs']['unet3d_kwargs']['num_levels']
self.depth = depth
#! for sliding-window case, pass all points!
if self.cfg['generation']['sliding_window']:
self.total_input_vol, self.total_query_vol, self.total_reso = \
decide_total_volume_range(100000, recep_field, unit_size, depth) # contain the whole scene
else:
self.total_input_vol, self.total_query_vol, self.total_reso = \
decide_total_volume_range(query_vol_metric, recep_field, unit_size, depth)
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.models)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
camera_view = np.random.randint(self.cfg['data']['n_views']) # camera view to which to warp
category = self.models[idx]['category']
model = self.models[idx]['model']
c_idx = self.metadata[category]['idx']
model_path = os.path.join(self.dataset_folder, category, model)
data = {}
if self.cfg['data']['input_type'] == 'pointcloud_crop':
info = self.get_vol_info(model_path)
data['pointcloud_crop'] = True
else:
info = c_idx
for field_name, field in self.fields.items():
try:
field_data = field.load(model_path, idx, info, camera_view)
except Exception:
if self.no_except:
logger.warn(
'Error occured when loading field %s of model %s'
% (field_name, model)
)
return None
else:
raise
if isinstance(field_data, dict):
for k, v in field_data.items():
if k is None:
data[field_name] = v
else:
data['%s.%s' % (field_name, k)] = v
else:
data[field_name] = field_data
if self.transform is not None:
data = self.transform(data)
return data
def get_vol_info(self, model_path):
''' Get crop information
Args:
model_path (str): path to the current data
'''
query_vol_size = self.cfg['data']['query_vol_size']
unit_size = self.cfg['data']['unit_size']
field_name = self.cfg['data']['pointcloud_file']
plane_type = self.cfg['model']['encoder_kwargs']['plane_type']
recep_field = 2**(self.cfg['model']['encoder_kwargs']['unet3d_kwargs']['num_levels'] + 2)
if self.cfg['data']['multi_files'] is None:
file_path = os.path.join(model_path, field_name)
else:
num = np.random.randint(self.cfg['data']['multi_files'])
file_path = os.path.join(model_path, field_name, '%s_%02d.npz' % (field_name, num))
points_dict = np.load(file_path)
p = points_dict['points']
if self.split == 'train':
# randomly sample a point as the center of input/query volume
p_c = [np.random.uniform(p[:,i].min(), p[:,i].max()) for i in range(3)]
# p_c = [np.random.uniform(-0.55, 0.55) for i in range(3)]
p_c = np.array(p_c).astype(np.float32)
reso = query_vol_size + recep_field - 1
# make sure the defined reso can be properly processed by UNet
reso = update_reso(reso, self.depth)
input_vol_metric = reso * unit_size
query_vol_metric = query_vol_size * unit_size
# bound for the volumes
lb_input_vol, ub_input_vol = p_c - input_vol_metric/2, p_c + input_vol_metric/2
lb_query_vol, ub_query_vol = p_c - query_vol_metric/2, p_c + query_vol_metric/2
input_vol = [lb_input_vol, ub_input_vol]
query_vol = [lb_query_vol, ub_query_vol]
else:
reso = self.total_reso
input_vol = self.total_input_vol
query_vol = self.total_query_vol
vol_info = {'plane_type': plane_type,
'reso' : reso,
'input_vol' : input_vol,
'query_vol' : query_vol}
return vol_info
def get_model_dict(self, idx):
return self.models[idx]
def test_model_complete(self, category, model):
''' Tests if model is complete.
Args:
model (str): modelname
'''
model_path = os.path.join(self.dataset_folder, category, model)
files = os.listdir(model_path)
for field_name, field in self.fields.items():
if not field.check_complete(files):
logger.warn('Field "%s" is incomplete: %s'
% (field_name, model_path))
return False
return True
def collate_remove_none(batch):
''' Collater that puts each data field into a tensor with outer dimension
batch size.
Args:
batch: batch
'''
batch = list(filter(lambda x: x is not None, batch))
return data.dataloader.default_collate(batch)
def worker_init_fn(worker_id):
''' Worker init function to ensure true randomness.
'''
def set_num_threads(nt):
try:
import mkl; mkl.set_num_threads(nt)
except:
pass
torch.set_num_threads(1)
os.environ['IPC_ENABLE']='1'
for o in ['OPENBLAS_NUM_THREADS','NUMEXPR_NUM_THREADS','OMP_NUM_THREADS','MKL_NUM_THREADS']:
os.environ[o] = str(nt)
random_data = os.urandom(4)
base_seed = int.from_bytes(random_data, byteorder="big")
np.random.seed(base_seed + worker_id)
| [
"logging.getLogger",
"torch.utils.data.dataloader.default_collate",
"os.path.exists",
"os.listdir",
"mkl.set_num_threads",
"os.urandom",
"os.path.join",
"yaml.load",
"numpy.array",
"numpy.random.randint",
"os.path.isdir",
"numpy.random.seed",
"numpy.load",
"src.common.decide_total_volume_r... | [((190, 217), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (207, 217), False, 'import logging\n'), ((9164, 9202), 'torch.utils.data.dataloader.default_collate', 'data.dataloader.default_collate', (['batch'], {}), '(batch)\n', (9195, 9202), False, 'from torch.utils import data\n'), ((9667, 9680), 'os.urandom', 'os.urandom', (['(4)'], {}), '(4)\n', (9677, 9680), False, 'import os\n'), ((9746, 9783), 'numpy.random.seed', 'np.random.seed', (['(base_seed + worker_id)'], {}), '(base_seed + worker_id)\n', (9760, 9783), True, 'import numpy as np\n'), ((1877, 1922), 'os.path.join', 'os.path.join', (['dataset_folder', '"""metadata.yaml"""'], {}), "(dataset_folder, 'metadata.yaml')\n", (1889, 1922), False, 'import os\n'), ((1935, 1964), 'os.path.exists', 'os.path.exists', (['metadata_file'], {}), '(metadata_file)\n', (1949, 1964), False, 'import os\n'), ((4757, 4803), 'numpy.random.randint', 'np.random.randint', (["self.cfg['data']['n_views']"], {}), "(self.cfg['data']['n_views'])\n", (4774, 4803), True, 'import numpy as np\n'), ((5003, 5053), 'os.path.join', 'os.path.join', (['self.dataset_folder', 'category', 'model'], {}), '(self.dataset_folder, category, model)\n', (5015, 5053), False, 'import os\n'), ((6988, 7006), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (6995, 7006), True, 'import numpy as np\n'), ((8566, 8616), 'os.path.join', 'os.path.join', (['self.dataset_folder', 'category', 'model'], {}), '(self.dataset_folder, category, model)\n', (8578, 8616), False, 'import os\n'), ((8633, 8655), 'os.listdir', 'os.listdir', (['model_path'], {}), '(model_path)\n', (8643, 8655), False, 'import os\n'), ((1671, 1697), 'os.listdir', 'os.listdir', (['dataset_folder'], {}), '(dataset_folder)\n', (1681, 1697), False, 'import os\n'), ((2422, 2453), 'os.path.join', 'os.path.join', (['dataset_folder', 'c'], {}), '(dataset_folder, c)\n', (2434, 2453), False, 'import os\n'), ((6741, 6777), 'os.path.join', 'os.path.join', (['model_path', 'field_name'], {}), '(model_path, field_name)\n', (6753, 6777), False, 'import os\n'), ((6810, 6860), 'numpy.random.randint', 'np.random.randint', (["self.cfg['data']['multi_files']"], {}), "(self.cfg['data']['multi_files'])\n", (6827, 6860), True, 'import numpy as np\n'), ((6885, 6956), 'os.path.join', 'os.path.join', (['model_path', 'field_name', "('%s_%02d.npz' % (field_name, num))"], {}), "(model_path, field_name, '%s_%02d.npz' % (field_name, num))\n", (6897, 6956), False, 'import os\n'), ((7514, 7543), 'src.common.update_reso', 'update_reso', (['reso', 'self.depth'], {}), '(reso, self.depth)\n', (7525, 7543), False, 'from src.common import decide_total_volume_range, update_reso\n'), ((9367, 9390), 'mkl.set_num_threads', 'mkl.set_num_threads', (['nt'], {}), '(nt)\n', (9386, 9390), False, 'import mkl\n'), ((2046, 2058), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (2055, 2058), False, 'import yaml\n'), ((2473, 2495), 'os.path.isdir', 'os.path.isdir', (['subpath'], {}), '(subpath)\n', (2486, 2495), False, 'import os\n'), ((2852, 2889), 'os.path.join', 'os.path.join', (['subpath', "(split + '.lst')"], {}), "(subpath, split + '.lst')\n", (2864, 2889), False, 'import os\n'), ((4179, 4243), 'src.common.decide_total_volume_range', 'decide_total_volume_range', (['(100000)', 'recep_field', 'unit_size', 'depth'], {}), '(100000, recep_field, unit_size, depth)\n', (4204, 4243), False, 'from src.common import decide_total_volume_range, update_reso\n'), ((4388, 4462), 'src.common.decide_total_volume_range', 'decide_total_volume_range', (['query_vol_metric', 'recep_field', 'unit_size', 'depth'], {}), '(query_vol_metric, recep_field, unit_size, depth)\n', (4413, 4462), False, 'from src.common import decide_total_volume_range, update_reso\n'), ((7322, 7335), 'numpy.array', 'np.array', (['p_c'], {}), '(p_c)\n', (7330, 7335), True, 'import numpy as np\n'), ((1789, 1820), 'os.path.join', 'os.path.join', (['dataset_folder', 'c'], {}), '(dataset_folder, c)\n', (1801, 1820), False, 'import os\n'), ((2707, 2726), 'os.listdir', 'os.listdir', (['subpath'], {}), '(subpath)\n', (2717, 2726), False, 'import os\n'), ((2745, 2769), 'os.path.join', 'os.path.join', (['subpath', 'd'], {}), '(subpath, d)\n', (2757, 2769), False, 'import os\n')] |
from argparse import ArgumentParser
import h5py
import multiprocessing
import numpy as np
from pathlib import Path
import pickle
import re
import tempfile
import time
import torch
import torch.utils.tensorboard
from types import SimpleNamespace
from utils.data import central_shift, EventCrop, ImageCrop
from utils.dataset import read_info
from utils.model import filter_kwargs, import_module
from utils.options import add_test_arguments, validate_test_args
from utils.options import options2model_kwargs
from utils.serializer import Serializer
from utils.testing import evaluate, read_config, ravel_config
def parse_args():
parser = ArgumentParser()
args = add_test_arguments(parser).parse_args()
args = validate_test_args(args)
return args
def get_output_path(args):
if args.model.suffix == '.pt':
model_path = args.model
else:
serializer = Serializer(args.model)
model_path = serializer._id2path(args.step)
return args.output/(model_path.stem + '.pkl')
def preprocess_args(args):
args.output = get_output_path(args)
args.is_temporary_model = True
f = tempfile.NamedTemporaryFile(suffix='.pt', delete=False)
Serializer(args.model).finalize(args.step,
f.name,
map_location=args.device)
args.model = Path(f.name)
f.close()
return args
def init_model(args, test_shape):
module = import_module(f'{args.flownet_path}.__init__',
args.flownet_path/'__init__.py')
model_kwargs = options2model_kwargs(args)
model_kwargs = filter_kwargs(module.OpticalFlow, model_kwargs)
model_kwargs.update({'device': args.device})
if args.model is None:
return module.OpticalFlow(test_shape, **model_kwargs)
else:
return module.OpticalFlow(test_shape, model=args.model, **model_kwargs)
def load_events(path):
with h5py.File(str(path), 'r') as data:
events = np.array(data['davis']['left']['events'],
dtype=np.float64).T
image_ts = np.array(data['davis']['left']['image_raw_ts'],
dtype=np.float64)
return events, image_ts
def load_gt(path):
gt = np.load(str(path))
return {k: gt[k] for k in gt.keys()}
def get_preprocessing_functions(imshape, test_shape, crop_type):
if crop_type == 'central':
box = list(central_shift(imshape, test_shape)) + test_shape
return EventCrop(box), ImageCrop(box)
else:
raise ValueError(f'Unknown crop type "{crop_type}"')
def postprocess_config(config, dataset):
if config.start is None:
config.start = dataset.first_ts
else:
config.start += dataset.first_ts
if config.stop is None:
config.stop = min(dataset.events[2][-1], dataset.gt['timestamps'][-2])
else:
config.stop += dataset.first_ts
return config
def generate_frames(cfg, image_ts):
b, e = np.searchsorted(image_ts, [cfg.start, cfg.stop])
return list(zip(image_ts[b: e - cfg.step], image_ts[b + cfg.step: e]))
def seq2paths(dataset_path, seq_name):
seq_type = re.sub(r'\d+$', '', seq_name)
seq_file = dataset_path/seq_type/(seq_name+'_data.hdf5')
gt_file = dataset_path/'FlowGT'/seq_type/(seq_name+'_gt_flow_dist.npz')
return seq_file, gt_file
def perform_single_test(args, cfg, dataset):
cfg = postprocess_config(cfg, dataset)
dataset.is_car = cfg.is_car
# generates frames
dataset.frames = generate_frames(cfg, dataset.image_ts)
# prepare event preprocesser
event_preproc_fun, gt_proc_fun = get_preprocessing_functions(
dataset.imshape,
cfg.test_shape,
cfg.crop_type)
# generate optical flow predictor
of = init_model(args, cfg.test_shape)
return evaluate(of,
dataset.events,
dataset.frames,
dataset.gt,
is_car=dataset.is_car,
event_preproc_fun=event_preproc_fun,
pred_postproc_fun=None,
gt_proc_fun=gt_proc_fun,
log=False)
def process_single(args):
args = preprocess_args(args)
if args.output.is_file():
if args.is_temporary_model:
args.model.unlink()
return
script_dir = Path(__file__).resolve().parent
data_dir = (script_dir/'..'/'data'/'raw').resolve()
info_dir = script_dir/'data'/'info'
config = read_config(script_dir/'config'/'testing.yml')
results = []
for ds_name, ds_config in config.items(): # process all datasets
# dir with dataset data
ds_dir = data_dir/ds_name
# load info
info_file = info_dir/(ds_name + '.hdf5')
ds_info = read_info(str(info_file))
for seq_name, seq_config in ds_config.items(): # process all sequences
seq_file, gt_file = seq2paths(ds_dir, seq_name)
dataset = SimpleNamespace(name=seq_name)
dataset.events, dataset.image_ts = load_events(seq_file)
dataset.gt = load_gt(gt_file)
dataset.imshape = dataset.gt['x_flow_dist'].shape[1:]
# first timestamp for the sequence
dataset.first_ts = ds_info[seq_name]
for cfg in ravel_config(seq_config):
cfg.dataset = ds_name
cfg.sequence = seq_name
cfg.mAEE, cfg.mpAEE = perform_single_test(args, cfg, dataset)
results.append(cfg)
print(f'[{cfg.sequence}, {cfg.start}, {cfg.stop}, {cfg.step}, '
f'{cfg.test_shape}, {cfg.crop_type}, {cfg.is_car}]: '
f'Mean AEE: {cfg.mAEE:.6f}, '
f'mean %AEE: {cfg.mpAEE*100:.6f}')
with args.output.open('wb') as f:
pickle.dump(results, f)
if args.is_temporary_model:
args.model.unlink()
def get_samples_passed(args):
serializer = Serializer(args.model)
model_path = serializer._id2path(args.step)
data = torch.load(model_path, map_location='cpu')
return data.get('samples_passed', data['global_step'] * args.bs)
class GPUPool:
def __init__(self, pool, gpus, tests_per_gpu, timeout=1):
self._pool = pool
self._gpus = gpus
self._tests_per_gpu = tests_per_gpu
self._timeout = timeout
def _wait(self, results, decrease=False):
is_continue = True
while is_continue:
is_continue = decrease
for d, device_results in results.items():
after = []
for r in device_results:
if r.ready():
is_continue = False
else:
after.append(r)
results[d] = after
if is_continue:
time.sleep(self._timeout)
return results
def _best_device(self, results):
best_device = results.keys()[0]
for device in results:
if len(results[device]) < len(results[best_device]):
best_device = device
return best_device
def __call__(self, func, args_list):
results = {device: [] for device in self._gpus}
for args in args_list:
decrease = False
while True:
results = self._wait(results, decrease=decrease)
best_device = self._best_device(results)
if len(results[best_device]) >= self._tests_per_gpu:
decrease = True
else:
break
args.device = best_device
results[best_device].append(self._pool.apply_async(func, args))
for _, device_results in results.items():
for r in device_results:
r.wait()
def process_all(args):
args.__dict__.pop('step', None)
serializer = Serializer(args.model)
all_args = [SimpleNamespace(step=s, **args.__dict__)
for s in serializer.list_known_steps()]
with multiprocessing.Pool(args.tests_per_gpu) as p:
GPUPool(p, args.gpus, args.tests_per_gpu)(process_single, all_args)
writer = torch.utils.tensorboard.SummaryWriter(args.output/'log')
for step_args in all_args:
samples_passed = get_samples_passed(step_args)
with get_output_path(step_args).open('rb') as f:
results = pickle.load(f)
for result in results:
tag = f'{result.dataset}/{result.sequence}/{result.step}/' \
f'{result.start}/{result.stop}'
writer.add_scalar(f'Test/mean AEE/{tag}',
result.mAEE,
samples_passed)
writer.add_scalar(f'Test/mean %AEE/{tag}',
result.mpAEE * 100,
samples_passed)
def main():
args = parse_args()
if args.step is None:
process_all(args)
else:
process_single(args)
if __name__ == '__main__':
main()
| [
"utils.model.import_module",
"time.sleep",
"numpy.array",
"utils.model.filter_kwargs",
"torch.utils.tensorboard.SummaryWriter",
"argparse.ArgumentParser",
"pathlib.Path",
"numpy.searchsorted",
"utils.serializer.Serializer",
"utils.options.options2model_kwargs",
"tempfile.NamedTemporaryFile",
"... | [((641, 657), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (655, 657), False, 'from argparse import ArgumentParser\n'), ((720, 744), 'utils.options.validate_test_args', 'validate_test_args', (['args'], {}), '(args)\n', (738, 744), False, 'from utils.options import add_test_arguments, validate_test_args\n'), ((1125, 1180), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".pt"""', 'delete': '(False)'}), "(suffix='.pt', delete=False)\n", (1152, 1180), False, 'import tempfile\n'), ((1351, 1363), 'pathlib.Path', 'Path', (['f.name'], {}), '(f.name)\n', (1355, 1363), False, 'from pathlib import Path\n'), ((1443, 1528), 'utils.model.import_module', 'import_module', (['f"""{args.flownet_path}.__init__"""', "(args.flownet_path / '__init__.py')"], {}), "(f'{args.flownet_path}.__init__', args.flownet_path /\n '__init__.py')\n", (1456, 1528), False, 'from utils.model import filter_kwargs, import_module\n'), ((1569, 1595), 'utils.options.options2model_kwargs', 'options2model_kwargs', (['args'], {}), '(args)\n', (1589, 1595), False, 'from utils.options import options2model_kwargs\n'), ((1615, 1662), 'utils.model.filter_kwargs', 'filter_kwargs', (['module.OpticalFlow', 'model_kwargs'], {}), '(module.OpticalFlow, model_kwargs)\n', (1628, 1662), False, 'from utils.model import filter_kwargs, import_module\n'), ((2967, 3015), 'numpy.searchsorted', 'np.searchsorted', (['image_ts', '[cfg.start, cfg.stop]'], {}), '(image_ts, [cfg.start, cfg.stop])\n', (2982, 3015), True, 'import numpy as np\n'), ((3147, 3176), 're.sub', 're.sub', (['"""\\\\d+$"""', '""""""', 'seq_name'], {}), "('\\\\d+$', '', seq_name)\n", (3153, 3176), False, 'import re\n'), ((3826, 4011), 'utils.testing.evaluate', 'evaluate', (['of', 'dataset.events', 'dataset.frames', 'dataset.gt'], {'is_car': 'dataset.is_car', 'event_preproc_fun': 'event_preproc_fun', 'pred_postproc_fun': 'None', 'gt_proc_fun': 'gt_proc_fun', 'log': '(False)'}), '(of, dataset.events, dataset.frames, dataset.gt, is_car=dataset.\n is_car, event_preproc_fun=event_preproc_fun, pred_postproc_fun=None,\n gt_proc_fun=gt_proc_fun, log=False)\n', (3834, 4011), False, 'from utils.testing import evaluate, read_config, ravel_config\n'), ((4497, 4547), 'utils.testing.read_config', 'read_config', (["(script_dir / 'config' / 'testing.yml')"], {}), "(script_dir / 'config' / 'testing.yml')\n", (4508, 4547), False, 'from utils.testing import evaluate, read_config, ravel_config\n'), ((5966, 5988), 'utils.serializer.Serializer', 'Serializer', (['args.model'], {}), '(args.model)\n', (5976, 5988), False, 'from utils.serializer import Serializer\n'), ((6048, 6090), 'torch.load', 'torch.load', (['model_path'], {'map_location': '"""cpu"""'}), "(model_path, map_location='cpu')\n", (6058, 6090), False, 'import torch\n'), ((7896, 7918), 'utils.serializer.Serializer', 'Serializer', (['args.model'], {}), '(args.model)\n', (7906, 7918), False, 'from utils.serializer import Serializer\n'), ((8177, 8235), 'torch.utils.tensorboard.SummaryWriter', 'torch.utils.tensorboard.SummaryWriter', (["(args.output / 'log')"], {}), "(args.output / 'log')\n", (8214, 8235), False, 'import torch\n'), ((888, 910), 'utils.serializer.Serializer', 'Serializer', (['args.model'], {}), '(args.model)\n', (898, 910), False, 'from utils.serializer import Serializer\n'), ((2084, 2149), 'numpy.array', 'np.array', (["data['davis']['left']['image_raw_ts']"], {'dtype': 'np.float64'}), "(data['davis']['left']['image_raw_ts'], dtype=np.float64)\n", (2092, 2149), True, 'import numpy as np\n'), ((5833, 5856), 'pickle.dump', 'pickle.dump', (['results', 'f'], {}), '(results, f)\n', (5844, 5856), False, 'import pickle\n'), ((7935, 7975), 'types.SimpleNamespace', 'SimpleNamespace', ([], {'step': 's'}), '(step=s, **args.__dict__)\n', (7950, 7975), False, 'from types import SimpleNamespace\n'), ((8041, 8081), 'multiprocessing.Pool', 'multiprocessing.Pool', (['args.tests_per_gpu'], {}), '(args.tests_per_gpu)\n', (8061, 8081), False, 'import multiprocessing\n'), ((669, 695), 'utils.options.add_test_arguments', 'add_test_arguments', (['parser'], {}), '(parser)\n', (687, 695), False, 'from utils.options import add_test_arguments, validate_test_args\n'), ((1185, 1207), 'utils.serializer.Serializer', 'Serializer', (['args.model'], {}), '(args.model)\n', (1195, 1207), False, 'from utils.serializer import Serializer\n'), ((1977, 2036), 'numpy.array', 'np.array', (["data['davis']['left']['events']"], {'dtype': 'np.float64'}), "(data['davis']['left']['events'], dtype=np.float64)\n", (1985, 2036), True, 'import numpy as np\n'), ((2477, 2491), 'utils.data.EventCrop', 'EventCrop', (['box'], {}), '(box)\n', (2486, 2491), False, 'from utils.data import central_shift, EventCrop, ImageCrop\n'), ((2493, 2507), 'utils.data.ImageCrop', 'ImageCrop', (['box'], {}), '(box)\n', (2502, 2507), False, 'from utils.data import central_shift, EventCrop, ImageCrop\n'), ((4975, 5005), 'types.SimpleNamespace', 'SimpleNamespace', ([], {'name': 'seq_name'}), '(name=seq_name)\n', (4990, 5005), False, 'from types import SimpleNamespace\n'), ((5304, 5328), 'utils.testing.ravel_config', 'ravel_config', (['seq_config'], {}), '(seq_config)\n', (5316, 5328), False, 'from utils.testing import evaluate, read_config, ravel_config\n'), ((8399, 8413), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8410, 8413), False, 'import pickle\n'), ((2413, 2447), 'utils.data.central_shift', 'central_shift', (['imshape', 'test_shape'], {}), '(imshape, test_shape)\n', (2426, 2447), False, 'from utils.data import central_shift, EventCrop, ImageCrop\n'), ((4355, 4369), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (4359, 4369), False, 'from pathlib import Path\n'), ((6848, 6873), 'time.sleep', 'time.sleep', (['self._timeout'], {}), '(self._timeout)\n', (6858, 6873), False, 'import time\n')] |
import numpy as np
import numpy.random as nr
from rlkit.exploration_strategies.base import RawExplorationStrategy
from rlkit.core.serializable import Serializable
class OUStrategy(RawExplorationStrategy, Serializable):
"""
This strategy implements the Ornstein-Uhlenbeck process, which adds
time-correlated noise to the actions taken by the deterministic policy.
The OU process satisfies the following stochastic differential equation:
dxt = theta*(mu - xt)*dt + sigma*dWt
where Wt denotes the Wiener process
Based on the rllab implementation.
"""
def __init__(
self,
action_space,
mu=0,
theta=0.15,
max_sigma=0.3,
min_sigma=0.3,
decay_period=100000,
):
assert len(action_space.shape) == 1
Serializable.quick_init(self, locals())
if min_sigma is None:
min_sigma = max_sigma
self.mu = mu
self.theta = theta
self.sigma = max_sigma
self._max_sigma = max_sigma
if min_sigma is None:
min_sigma = max_sigma
self._min_sigma = min_sigma
self._decay_period = decay_period
self.dim = np.prod(action_space.low.shape)
self.low = action_space.low
self.high = action_space.high
self.state = np.ones(self.dim) * self.mu
self.reset()
def reset(self):
self.state = np.ones(self.dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * nr.randn(len(x))
self.state = x + dx
return self.state
def get_action_from_raw_action(self, action, t=0, **kwargs):
ou_state = self.evolve_state()
self.sigma = self._max_sigma - (self._max_sigma - self._min_sigma) * min(
1.0, t * 1.0 / self._decay_period
)
return np.clip(action + ou_state, self.low, self.high)
def get_actions_from_raw_actions(self, actions, t=0, **kwargs):
noise = (
self.state
+ self.theta * (self.mu - self.state)
+ self.sigma * nr.randn(*actions.shape)
)
return np.clip(actions + noise, self.low, self.high)
| [
"numpy.clip",
"numpy.prod",
"numpy.random.randn",
"numpy.ones"
] | [((1187, 1218), 'numpy.prod', 'np.prod', (['action_space.low.shape'], {}), '(action_space.low.shape)\n', (1194, 1218), True, 'import numpy as np\n'), ((1870, 1917), 'numpy.clip', 'np.clip', (['(action + ou_state)', 'self.low', 'self.high'], {}), '(action + ou_state, self.low, self.high)\n', (1877, 1917), True, 'import numpy as np\n'), ((2155, 2200), 'numpy.clip', 'np.clip', (['(actions + noise)', 'self.low', 'self.high'], {}), '(actions + noise, self.low, self.high)\n', (2162, 2200), True, 'import numpy as np\n'), ((1314, 1331), 'numpy.ones', 'np.ones', (['self.dim'], {}), '(self.dim)\n', (1321, 1331), True, 'import numpy as np\n'), ((1406, 1423), 'numpy.ones', 'np.ones', (['self.dim'], {}), '(self.dim)\n', (1413, 1423), True, 'import numpy as np\n'), ((2105, 2129), 'numpy.random.randn', 'nr.randn', (['*actions.shape'], {}), '(*actions.shape)\n', (2113, 2129), True, 'import numpy.random as nr\n')] |
#!/usr/bin/env python
import rospy
import numpy as np
from state_visualizer import CostmapVisualizer
from neuro_local_planner_wrapper.msg import Transition
# Global variable (not ideal but works)
viewer = CostmapVisualizer()
def callback(data):
if not data.is_episode_finished:
data_1d = np.asarray([(100 - data) for data in data.state_representation])
data_3d = data_1d.reshape(4, 84, 84).swapaxes(1, 2)
data_3d = np.rollaxis(data_3d, 0, 3)
# Make this a state batch with just one state in the batch
data_3d = np.expand_dims(data_3d, axis=0)
viewer.set_data(data_3d)
def main():
rospy.init_node("neuro_input_visualizer", anonymous=False)
subscriber = rospy.Subscriber("/move_base/NeuroLocalPlannerWrapper/transition", Transition, callback)
while not rospy.is_shutdown():
viewer.run()
if __name__ == '__main__':
main()
| [
"rospy.is_shutdown",
"rospy.init_node",
"numpy.asarray",
"numpy.rollaxis",
"numpy.expand_dims",
"rospy.Subscriber",
"state_visualizer.CostmapVisualizer"
] | [((209, 228), 'state_visualizer.CostmapVisualizer', 'CostmapVisualizer', ([], {}), '()\n', (226, 228), False, 'from state_visualizer import CostmapVisualizer\n'), ((649, 707), 'rospy.init_node', 'rospy.init_node', (['"""neuro_input_visualizer"""'], {'anonymous': '(False)'}), "('neuro_input_visualizer', anonymous=False)\n", (664, 707), False, 'import rospy\n'), ((726, 818), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/move_base/NeuroLocalPlannerWrapper/transition"""', 'Transition', 'callback'], {}), "('/move_base/NeuroLocalPlannerWrapper/transition',\n Transition, callback)\n", (742, 818), False, 'import rospy\n'), ((307, 371), 'numpy.asarray', 'np.asarray', (['[(100 - data) for data in data.state_representation]'], {}), '([(100 - data) for data in data.state_representation])\n', (317, 371), True, 'import numpy as np\n'), ((452, 478), 'numpy.rollaxis', 'np.rollaxis', (['data_3d', '(0)', '(3)'], {}), '(data_3d, 0, 3)\n', (463, 478), True, 'import numpy as np\n'), ((565, 596), 'numpy.expand_dims', 'np.expand_dims', (['data_3d'], {'axis': '(0)'}), '(data_3d, axis=0)\n', (579, 596), True, 'import numpy as np\n'), ((829, 848), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (846, 848), False, 'import rospy\n')] |
"""Test distributed save and load."""
import subprocess
import tempfile
import unittest
import jax
import jax.numpy as jnp
import numpy as np
import optax
from alpa import (init, shutdown, DistributedArray, PipeshardParallel,
save_checkpoint, restore_checkpoint)
from alpa.device_mesh import get_global_cluster
from alpa.model.bert_model import BertConfig
from alpa.model.model_util import TrainState
from alpa.testing import (MLPModel, BertLayerModel, create_train_state,
get_bert_layer_train_step, get_mlp_train_step,
assert_allclose)
class DistSaveLoadTest(unittest.TestCase):
def setUp(self):
init(cluster="ray")
def tearDown(self):
shutdown()
def check_dist_array_eq(self, x, y):
if isinstance(x, DistributedArray):
x = np.array(
x.device_mesh.get_remote_buffers(x.remote_buffers,
batching=True))
if isinstance(y, DistributedArray):
y = np.array(
y.device_mesh.get_remote_buffers(y.remote_buffers,
batching=True))
assert_allclose(x, y)
def _get_efs_mount_point(self):
# Hacky function to get the EFS mount point
for line in subprocess.check_output("df -h", shell=True).decode().split('\n'):
cols = line.split(' ')
if "efs" in cols[0]:
return cols[-1]+"/"
return None
def _get_save_prefix(self):
device_cluster = get_global_cluster()
if len(device_cluster.host_info) > 1:
# Get EFS mount point for the multi-host test
save_prefix = self._get_efs_mount_point()
if save_prefix is None:
self.skipTest("The multi-host test requires a mounted EFS! ")
else:
# Use tmp dir for the single-host test
save_prefix = "/tmp/"
return save_prefix
def test_distributed_array_save_load(self):
device_cluster = get_global_cluster()
save_prefix = self._get_save_prefix()
# Launch a device mesh contains four devices
if device_cluster.num_devices < 4:
self.skipTest(
"This unit test requires a cluster with at least 4 devices! ")
host_num = min(len(device_cluster.host_info), 4)
device_per_host = 4 // host_num
physical_mesh = device_cluster.get_physical_mesh(
list(range(host_num)), device_per_host)
logical_mesh = physical_mesh.get_logical_mesh([2, 2])
global_input_shape = (4, 2)
num = np.prod(np.array(global_input_shape))
# Build DistributedArray to be saved
# [[0,1], [[0], [[1],
# [2,3], shard [2]] [3]]
# [4,5], ====> [[4], [[5],
# [6,7]] [6]] [7]]
global_input_data1 = jnp.arange(num).reshape(global_input_shape)
input_sharding_spec = logical_mesh.make_tile_spec(
global_input_data1, [0, 1], [0, 1])
input_indices = input_sharding_spec.indices(
global_input_data1.shape).flatten()
(dist_input_data1,) = physical_mesh.shard_args_to_arrays(
(jax.ShapedArray(global_input_data1.shape, jnp.int32),),
(input_indices,), (input_sharding_spec,), (global_input_data1,))
# Check the DistributedArray's remote buffers
desired_buffers1 = np.array([[[0], [2]], [[1], [3]], [[4], [6]],
[[5], [7]]])
self.check_dist_array_eq(desired_buffers1, dist_input_data1)
# Save the DistributedArray (one replica only)
tmpdir = tempfile.TemporaryDirectory(prefix=save_prefix)
subprocess.run(["rm", "-rf", tmpdir.name])
dist_input_data1.save(tmpdir.name)
# Load previously saved DistributedArray with a different shardingSpec
# [[0,1], [[0,1], [[0,1],
# [2,3], shard [2,3]] [2,3]]
# [4,5], ====> [[4,5], [[4,5],
# [6,7]] [6,7]] [6,7]]
load_sharding_spec = logical_mesh.make_tile_spec(
global_input_data1, [0, 1], [0])
dist_load_data1 = DistributedArray.load(
tmpdir.name, jax.ShapedArray(global_input_data1.shape, jnp.int32),
physical_mesh, load_sharding_spec)
# Check the DistributedArray's remote buffers
desired_buffers2 = np.array([[[0, 1], [2, 3]], [[0, 1], [2, 3]],
[[4, 5], [6, 7]], [[4, 5], [6, 7]]])
self.check_dist_array_eq(desired_buffers2, dist_load_data1)
# Cleanup
physical_mesh.shutdown()
def test_jax_mlp_save_dist_load(self):
save_prefix = self._get_save_prefix()
# Init model and optimizer
batch_size = 64
hidden_dim = 16
input_dim = output_dim = hidden_dim
model = MLPModel(hidden_dim=hidden_dim,
output_dim=output_dim,
manual_pipeline_layer=True)
# Init batch args
rngkey = jax.random.PRNGKey(0)
x = jax.random.normal(rngkey, (batch_size, input_dim), jnp.float32)
y = jax.random.normal(rngkey, (batch_size, output_dim), jnp.float32)
batch = {'x': x, 'y': y}
jax_state = create_train_state(rngkey, model, [x])
with tempfile.TemporaryDirectory(prefix=save_prefix) as ckpt_dir:
# save normal jax model using tensorstore for distributed loading
save_checkpoint(ckpt_dir, jax_state, 1)
# Compile
method = PipeshardParallel(num_micro_batches=2)
serial_train_step = get_mlp_train_step(None, None, None, False)
parallel_train_step = get_mlp_train_step(method, True, False, False)
executable = parallel_train_step.get_executable(jax_state, batch)
# Restore checkpoint
state_ss, _ = executable.get_load_info()
load_state = restore_checkpoint(ckpt_dir, 1, state_ss)
# Run after load
serial_state = serial_train_step(jax_state, batch)[0]
load_state = parallel_train_step(load_state, batch)[0]
# Check results
assert_allclose(serial_state.params, load_state.params, 1e-3, 1e-3)
def test_distributed_mlp_save_load(self):
save_prefix = self._get_save_prefix()
# Init model and optimizer
batch_size = 64
hidden_dim = 16
input_dim = output_dim = hidden_dim
model = MLPModel(hidden_dim=hidden_dim,
output_dim=output_dim,
manual_pipeline_layer=True)
# Init batch args
rngkey = jax.random.PRNGKey(0)
x = jax.random.normal(rngkey, (batch_size, input_dim), jnp.float32)
y = jax.random.normal(rngkey, (batch_size, output_dim), jnp.float32)
batch = {'x': x, 'y': y}
state = create_train_state(rngkey, model, [x])
# Compile
method = PipeshardParallel(num_micro_batches=2)
serial_train_step = get_mlp_train_step(None, None, None, False)
parallel_train_step = get_mlp_train_step(method, True, False, False)
executable = parallel_train_step.get_executable(state, batch)
# Run before save
serial_state = state
parallel_state = state
serial_state = serial_train_step(serial_state, batch)[0]
parallel_state = parallel_train_step(parallel_state, batch)[0]
assert_allclose(serial_state.params, parallel_state.params, 1e-3, 1e-3)
with tempfile.TemporaryDirectory(prefix=save_prefix) as ckpt_dir:
# Save checkpoint
save_checkpoint(ckpt_dir, parallel_state, 1)
# Restore checkpoint
state_ss, _ = executable.get_load_info()
load_state = restore_checkpoint(ckpt_dir, 1, state_ss)
# Run after load
serial_state = serial_train_step(serial_state, batch)[0]
load_state = parallel_train_step(load_state, batch)[0]
# Check results
assert_allclose(serial_state.params, load_state.params, 1e-3, 1e-3)
def test_distributed_bert_save_load(self):
save_prefix = self._get_save_prefix()
# Config
batch_size = 16
seq_len = 8
hidden_size = 128
num_heads = 8
n_layers = 4
dtype = jnp.float32
# Init batch args
rngkey = jax.random.PRNGKey(0)
x = jax.random.normal(rngkey, (batch_size, seq_len, hidden_size),
dtype=dtype)
y = jax.random.normal(rngkey, (batch_size, seq_len, hidden_size),
dtype=dtype)
attention_mask = jnp.ones((batch_size, seq_len), dtype=dtype)
batch = {"x": x, "y": y, "attention_mask": attention_mask}
# Init model and optimizer
model = BertLayerModel(config=BertConfig(hidden_size=hidden_size,
intermediate_size=hidden_size *
4,
num_attention_heads=num_heads,
num_hidden_layers=n_layers))
rngkey = jax.random.PRNGKey(0)
params = model.init(rngkey, x, attention_mask)
tx = optax.sgd(learning_rate=1e-2)
state = TrainState.create(apply_fn=model.apply,
params=params,
tx=tx,
dynamic_scale=None)
# Compile
method = PipeshardParallel(num_micro_batches=2)
serial_train_step = get_bert_layer_train_step(None, None, None,
n_layers, False)
parallel_train_step = get_bert_layer_train_step(method, True, False,
n_layers, False)
executable = parallel_train_step.get_executable(state, batch)
# Run before save
serial_state = state
parallel_state = state
serial_state = serial_train_step(serial_state, batch)[0]
parallel_state = parallel_train_step(parallel_state, batch)[0]
assert_allclose(serial_state.params, parallel_state.params, 1e-3, 1e-3)
with tempfile.TemporaryDirectory(prefix=save_prefix) as ckpt_dir:
# Save checkpoint
save_checkpoint(ckpt_dir, parallel_state, 1)
# Restore checkpoint
state_ss, _ = executable.get_load_info()
load_state = restore_checkpoint(ckpt_dir, 1, state_ss)
# Run after load
serial_state = serial_train_step(serial_state, batch)[0]
load_state = parallel_train_step(load_state, batch)[0]
# Check results
assert_allclose(serial_state.params, load_state.params, 1e-3, 1e-3)
def suite():
suite = unittest.TestSuite()
suite.addTest(DistSaveLoadTest("test_distributed_array_save_load"))
suite.addTest(DistSaveLoadTest("test_jax_mlp_save_dist_load"))
suite.addTest(DistSaveLoadTest("test_distributed_mlp_save_load"))
suite.addTest(DistSaveLoadTest("test_distributed_bert_save_load"))
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(suite())
| [
"alpa.testing.get_bert_layer_train_step",
"numpy.array",
"alpa.shutdown",
"unittest.TextTestRunner",
"unittest.TestSuite",
"jax.random.PRNGKey",
"alpa.testing.assert_allclose",
"subprocess.run",
"jax.random.normal",
"alpa.device_mesh.get_global_cluster",
"subprocess.check_output",
"alpa.testin... | [((10982, 11002), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (11000, 11002), False, 'import unittest\n'), ((11342, 11367), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (11365, 11367), False, 'import unittest\n'), ((684, 703), 'alpa.init', 'init', ([], {'cluster': '"""ray"""'}), "(cluster='ray')\n", (688, 703), False, 'from alpa import init, shutdown, DistributedArray, PipeshardParallel, save_checkpoint, restore_checkpoint\n'), ((737, 747), 'alpa.shutdown', 'shutdown', ([], {}), '()\n', (745, 747), False, 'from alpa import init, shutdown, DistributedArray, PipeshardParallel, save_checkpoint, restore_checkpoint\n'), ((1202, 1223), 'alpa.testing.assert_allclose', 'assert_allclose', (['x', 'y'], {}), '(x, y)\n', (1217, 1223), False, 'from alpa.testing import MLPModel, BertLayerModel, create_train_state, get_bert_layer_train_step, get_mlp_train_step, assert_allclose\n'), ((1582, 1602), 'alpa.device_mesh.get_global_cluster', 'get_global_cluster', ([], {}), '()\n', (1600, 1602), False, 'from alpa.device_mesh import get_global_cluster\n'), ((2075, 2095), 'alpa.device_mesh.get_global_cluster', 'get_global_cluster', ([], {}), '()\n', (2093, 2095), False, 'from alpa.device_mesh import get_global_cluster\n'), ((3484, 3542), 'numpy.array', 'np.array', (['[[[0], [2]], [[1], [3]], [[4], [6]], [[5], [7]]]'], {}), '([[[0], [2]], [[1], [3]], [[4], [6]], [[5], [7]]])\n', (3492, 3542), True, 'import numpy as np\n'), ((3722, 3769), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'prefix': 'save_prefix'}), '(prefix=save_prefix)\n', (3749, 3769), False, 'import tempfile\n'), ((3778, 3820), 'subprocess.run', 'subprocess.run', (["['rm', '-rf', tmpdir.name]"], {}), "(['rm', '-rf', tmpdir.name])\n", (3792, 3820), False, 'import subprocess\n'), ((4480, 4566), 'numpy.array', 'np.array', (['[[[0, 1], [2, 3]], [[0, 1], [2, 3]], [[4, 5], [6, 7]], [[4, 5], [6, 7]]]'], {}), '([[[0, 1], [2, 3]], [[0, 1], [2, 3]], [[4, 5], [6, 7]], [[4, 5], [6,\n 7]]])\n', (4488, 4566), True, 'import numpy as np\n'), ((4958, 5044), 'alpa.testing.MLPModel', 'MLPModel', ([], {'hidden_dim': 'hidden_dim', 'output_dim': 'output_dim', 'manual_pipeline_layer': '(True)'}), '(hidden_dim=hidden_dim, output_dim=output_dim,\n manual_pipeline_layer=True)\n', (4966, 5044), False, 'from alpa.testing import MLPModel, BertLayerModel, create_train_state, get_bert_layer_train_step, get_mlp_train_step, assert_allclose\n'), ((5135, 5156), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (5153, 5156), False, 'import jax\n'), ((5169, 5232), 'jax.random.normal', 'jax.random.normal', (['rngkey', '(batch_size, input_dim)', 'jnp.float32'], {}), '(rngkey, (batch_size, input_dim), jnp.float32)\n', (5186, 5232), False, 'import jax\n'), ((5245, 5309), 'jax.random.normal', 'jax.random.normal', (['rngkey', '(batch_size, output_dim)', 'jnp.float32'], {}), '(rngkey, (batch_size, output_dim), jnp.float32)\n', (5262, 5309), False, 'import jax\n'), ((5363, 5401), 'alpa.testing.create_train_state', 'create_train_state', (['rngkey', 'model', '[x]'], {}), '(rngkey, model, [x])\n', (5381, 5401), False, 'from alpa.testing import MLPModel, BertLayerModel, create_train_state, get_bert_layer_train_step, get_mlp_train_step, assert_allclose\n'), ((6277, 6346), 'alpa.testing.assert_allclose', 'assert_allclose', (['serial_state.params', 'load_state.params', '(0.001)', '(0.001)'], {}), '(serial_state.params, load_state.params, 0.001, 0.001)\n', (6292, 6346), False, 'from alpa.testing import MLPModel, BertLayerModel, create_train_state, get_bert_layer_train_step, get_mlp_train_step, assert_allclose\n'), ((6583, 6669), 'alpa.testing.MLPModel', 'MLPModel', ([], {'hidden_dim': 'hidden_dim', 'output_dim': 'output_dim', 'manual_pipeline_layer': '(True)'}), '(hidden_dim=hidden_dim, output_dim=output_dim,\n manual_pipeline_layer=True)\n', (6591, 6669), False, 'from alpa.testing import MLPModel, BertLayerModel, create_train_state, get_bert_layer_train_step, get_mlp_train_step, assert_allclose\n'), ((6760, 6781), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (6778, 6781), False, 'import jax\n'), ((6794, 6857), 'jax.random.normal', 'jax.random.normal', (['rngkey', '(batch_size, input_dim)', 'jnp.float32'], {}), '(rngkey, (batch_size, input_dim), jnp.float32)\n', (6811, 6857), False, 'import jax\n'), ((6870, 6934), 'jax.random.normal', 'jax.random.normal', (['rngkey', '(batch_size, output_dim)', 'jnp.float32'], {}), '(rngkey, (batch_size, output_dim), jnp.float32)\n', (6887, 6934), False, 'import jax\n'), ((6984, 7022), 'alpa.testing.create_train_state', 'create_train_state', (['rngkey', 'model', '[x]'], {}), '(rngkey, model, [x])\n', (7002, 7022), False, 'from alpa.testing import MLPModel, BertLayerModel, create_train_state, get_bert_layer_train_step, get_mlp_train_step, assert_allclose\n'), ((7059, 7097), 'alpa.PipeshardParallel', 'PipeshardParallel', ([], {'num_micro_batches': '(2)'}), '(num_micro_batches=2)\n', (7076, 7097), False, 'from alpa import init, shutdown, DistributedArray, PipeshardParallel, save_checkpoint, restore_checkpoint\n'), ((7126, 7169), 'alpa.testing.get_mlp_train_step', 'get_mlp_train_step', (['None', 'None', 'None', '(False)'], {}), '(None, None, None, False)\n', (7144, 7169), False, 'from alpa.testing import MLPModel, BertLayerModel, create_train_state, get_bert_layer_train_step, get_mlp_train_step, assert_allclose\n'), ((7200, 7246), 'alpa.testing.get_mlp_train_step', 'get_mlp_train_step', (['method', '(True)', '(False)', '(False)'], {}), '(method, True, False, False)\n', (7218, 7246), False, 'from alpa.testing import MLPModel, BertLayerModel, create_train_state, get_bert_layer_train_step, get_mlp_train_step, assert_allclose\n'), ((7548, 7621), 'alpa.testing.assert_allclose', 'assert_allclose', (['serial_state.params', 'parallel_state.params', '(0.001)', '(0.001)'], {}), '(serial_state.params, parallel_state.params, 0.001, 0.001)\n', (7563, 7621), False, 'from alpa.testing import MLPModel, BertLayerModel, create_train_state, get_bert_layer_train_step, get_mlp_train_step, assert_allclose\n'), ((8135, 8204), 'alpa.testing.assert_allclose', 'assert_allclose', (['serial_state.params', 'load_state.params', '(0.001)', '(0.001)'], {}), '(serial_state.params, load_state.params, 0.001, 0.001)\n', (8150, 8204), False, 'from alpa.testing import MLPModel, BertLayerModel, create_train_state, get_bert_layer_train_step, get_mlp_train_step, assert_allclose\n'), ((8500, 8521), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (8518, 8521), False, 'import jax\n'), ((8534, 8608), 'jax.random.normal', 'jax.random.normal', (['rngkey', '(batch_size, seq_len, hidden_size)'], {'dtype': 'dtype'}), '(rngkey, (batch_size, seq_len, hidden_size), dtype=dtype)\n', (8551, 8608), False, 'import jax\n'), ((8651, 8725), 'jax.random.normal', 'jax.random.normal', (['rngkey', '(batch_size, seq_len, hidden_size)'], {'dtype': 'dtype'}), '(rngkey, (batch_size, seq_len, hidden_size), dtype=dtype)\n', (8668, 8725), False, 'import jax\n'), ((8781, 8825), 'jax.numpy.ones', 'jnp.ones', (['(batch_size, seq_len)'], {'dtype': 'dtype'}), '((batch_size, seq_len), dtype=dtype)\n', (8789, 8825), True, 'import jax.numpy as jnp\n'), ((9311, 9332), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (9329, 9332), False, 'import jax\n'), ((9401, 9430), 'optax.sgd', 'optax.sgd', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (9410, 9430), False, 'import optax\n'), ((9447, 9533), 'alpa.model.model_util.TrainState.create', 'TrainState.create', ([], {'apply_fn': 'model.apply', 'params': 'params', 'tx': 'tx', 'dynamic_scale': 'None'}), '(apply_fn=model.apply, params=params, tx=tx, dynamic_scale\n =None)\n', (9464, 9533), False, 'from alpa.model.model_util import TrainState\n'), ((9667, 9705), 'alpa.PipeshardParallel', 'PipeshardParallel', ([], {'num_micro_batches': '(2)'}), '(num_micro_batches=2)\n', (9684, 9705), False, 'from alpa import init, shutdown, DistributedArray, PipeshardParallel, save_checkpoint, restore_checkpoint\n'), ((9734, 9794), 'alpa.testing.get_bert_layer_train_step', 'get_bert_layer_train_step', (['None', 'None', 'None', 'n_layers', '(False)'], {}), '(None, None, None, n_layers, False)\n', (9759, 9794), False, 'from alpa.testing import MLPModel, BertLayerModel, create_train_state, get_bert_layer_train_step, get_mlp_train_step, assert_allclose\n'), ((9879, 9942), 'alpa.testing.get_bert_layer_train_step', 'get_bert_layer_train_step', (['method', '(True)', '(False)', 'n_layers', '(False)'], {}), '(method, True, False, n_layers, False)\n', (9904, 9942), False, 'from alpa.testing import MLPModel, BertLayerModel, create_train_state, get_bert_layer_train_step, get_mlp_train_step, assert_allclose\n'), ((10300, 10373), 'alpa.testing.assert_allclose', 'assert_allclose', (['serial_state.params', 'parallel_state.params', '(0.001)', '(0.001)'], {}), '(serial_state.params, parallel_state.params, 0.001, 0.001)\n', (10315, 10373), False, 'from alpa.testing import MLPModel, BertLayerModel, create_train_state, get_bert_layer_train_step, get_mlp_train_step, assert_allclose\n'), ((10887, 10956), 'alpa.testing.assert_allclose', 'assert_allclose', (['serial_state.params', 'load_state.params', '(0.001)', '(0.001)'], {}), '(serial_state.params, load_state.params, 0.001, 0.001)\n', (10902, 10956), False, 'from alpa.testing import MLPModel, BertLayerModel, create_train_state, get_bert_layer_train_step, get_mlp_train_step, assert_allclose\n'), ((2673, 2701), 'numpy.array', 'np.array', (['global_input_shape'], {}), '(global_input_shape)\n', (2681, 2701), True, 'import numpy as np\n'), ((4297, 4349), 'jax.ShapedArray', 'jax.ShapedArray', (['global_input_data1.shape', 'jnp.int32'], {}), '(global_input_data1.shape, jnp.int32)\n', (4312, 4349), False, 'import jax\n'), ((5417, 5464), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'prefix': 'save_prefix'}), '(prefix=save_prefix)\n', (5444, 5464), False, 'import tempfile\n'), ((5568, 5607), 'alpa.save_checkpoint', 'save_checkpoint', (['ckpt_dir', 'jax_state', '(1)'], {}), '(ckpt_dir, jax_state, 1)\n', (5583, 5607), False, 'from alpa import init, shutdown, DistributedArray, PipeshardParallel, save_checkpoint, restore_checkpoint\n'), ((5652, 5690), 'alpa.PipeshardParallel', 'PipeshardParallel', ([], {'num_micro_batches': '(2)'}), '(num_micro_batches=2)\n', (5669, 5690), False, 'from alpa import init, shutdown, DistributedArray, PipeshardParallel, save_checkpoint, restore_checkpoint\n'), ((5723, 5766), 'alpa.testing.get_mlp_train_step', 'get_mlp_train_step', (['None', 'None', 'None', '(False)'], {}), '(None, None, None, False)\n', (5741, 5766), False, 'from alpa.testing import MLPModel, BertLayerModel, create_train_state, get_bert_layer_train_step, get_mlp_train_step, assert_allclose\n'), ((5801, 5847), 'alpa.testing.get_mlp_train_step', 'get_mlp_train_step', (['method', '(True)', '(False)', '(False)'], {}), '(method, True, False, False)\n', (5819, 5847), False, 'from alpa.testing import MLPModel, BertLayerModel, create_train_state, get_bert_layer_train_step, get_mlp_train_step, assert_allclose\n'), ((6039, 6080), 'alpa.restore_checkpoint', 'restore_checkpoint', (['ckpt_dir', '(1)', 'state_ss'], {}), '(ckpt_dir, 1, state_ss)\n', (6057, 6080), False, 'from alpa import init, shutdown, DistributedArray, PipeshardParallel, save_checkpoint, restore_checkpoint\n'), ((7634, 7681), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'prefix': 'save_prefix'}), '(prefix=save_prefix)\n', (7661, 7681), False, 'import tempfile\n'), ((7737, 7781), 'alpa.save_checkpoint', 'save_checkpoint', (['ckpt_dir', 'parallel_state', '(1)'], {}), '(ckpt_dir, parallel_state, 1)\n', (7752, 7781), False, 'from alpa import init, shutdown, DistributedArray, PipeshardParallel, save_checkpoint, restore_checkpoint\n'), ((7894, 7935), 'alpa.restore_checkpoint', 'restore_checkpoint', (['ckpt_dir', '(1)', 'state_ss'], {}), '(ckpt_dir, 1, state_ss)\n', (7912, 7935), False, 'from alpa import init, shutdown, DistributedArray, PipeshardParallel, save_checkpoint, restore_checkpoint\n'), ((10386, 10433), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'prefix': 'save_prefix'}), '(prefix=save_prefix)\n', (10413, 10433), False, 'import tempfile\n'), ((10489, 10533), 'alpa.save_checkpoint', 'save_checkpoint', (['ckpt_dir', 'parallel_state', '(1)'], {}), '(ckpt_dir, parallel_state, 1)\n', (10504, 10533), False, 'from alpa import init, shutdown, DistributedArray, PipeshardParallel, save_checkpoint, restore_checkpoint\n'), ((10646, 10687), 'alpa.restore_checkpoint', 'restore_checkpoint', (['ckpt_dir', '(1)', 'state_ss'], {}), '(ckpt_dir, 1, state_ss)\n', (10664, 10687), False, 'from alpa import init, shutdown, DistributedArray, PipeshardParallel, save_checkpoint, restore_checkpoint\n'), ((2938, 2953), 'jax.numpy.arange', 'jnp.arange', (['num'], {}), '(num)\n', (2948, 2953), True, 'import jax.numpy as jnp\n'), ((3269, 3321), 'jax.ShapedArray', 'jax.ShapedArray', (['global_input_data1.shape', 'jnp.int32'], {}), '(global_input_data1.shape, jnp.int32)\n', (3284, 3321), False, 'import jax\n'), ((8967, 9100), 'alpa.model.bert_model.BertConfig', 'BertConfig', ([], {'hidden_size': 'hidden_size', 'intermediate_size': '(hidden_size * 4)', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'n_layers'}), '(hidden_size=hidden_size, intermediate_size=hidden_size * 4,\n num_attention_heads=num_heads, num_hidden_layers=n_layers)\n', (8977, 9100), False, 'from alpa.model.bert_model import BertConfig\n'), ((1333, 1377), 'subprocess.check_output', 'subprocess.check_output', (['"""df -h"""'], {'shell': '(True)'}), "('df -h', shell=True)\n", (1356, 1377), False, 'import subprocess\n')] |
# -*- coding: utf-8 -*-
"""
Recriação do Jogo da Velha
@author: Prof. <NAME>
"""
import pygame
import sys
import os
import traceback
import random
import numpy as np
import copy
# Import - Inicialização da arvore e busca em profundidade
import tree_dfs
class GameConstants:
# R G B
ColorWhite = (255, 255, 255)
ColorBlack = ( 0, 0, 0)
ColorRed = (255, 0, 0)
ColorGreen = ( 0, 255, 0)
ColorBlue = ( 0, 0, 255)
ColorDarkGreen = ( 0, 155, 0)
ColorDarkGray = ( 40, 40, 40)
BackgroundColor = ColorBlack
screenScale = 1
screenWidth = screenScale*600
screenHeight = screenScale*600
# grid size in units
gridWidth = 3
gridHeight = 3
# grid size in pixels
gridMarginSize = 5
gridCellWidth = screenWidth//gridWidth - 2*gridMarginSize
gridCellHeight = screenHeight//gridHeight - 2*gridMarginSize
randomSeed = 0
FPS = 30
fontSize = 20
#########################################
# Nó raiz
root_node = tree_dfs.NodeBoard()
class Game:
class GameState:
# 0 empty, 1 X, 2 O
grid = np.zeros((GameConstants.gridHeight, GameConstants.gridWidth))
currentPlayer = 0
def __init__(self, expectUserInputs=True):
self.expectUserInputs = expectUserInputs
# Game state list - stores a state for each time step (initial state)
gs = Game.GameState()
self.states = [gs]
# Determines if simulation is active or not
self.alive = True
self.currentPlayer = 1
# Journal of inputs by users (stack)
self.eventJournal = []
def checkObjectiveState(self, gs):
# Complete line?
for i in range(3):
s = set(gs.grid[i, :])
if len(s) == 1 and min(s) != 0:
return s.pop()
# Complete column?
for i in range(3):
s = set(gs.grid[:, i])
if len(s) == 1 and min(s) != 0:
return s.pop()
# Complete diagonal (main)?
s = set([gs.grid[i, i] for i in range(3)])
if len(s) == 1 and min(s) != 0:
return s.pop()
# Complete diagonal (opposite)?
s = set([gs.grid[-i-1, i] for i in range(3)])
if len(s) == 1 and min(s) != 0:
return s.pop()
# nope, not an objective state
return 0
# Implements a game tick
# Each call simulates a world step
def update(self):
# If the game is done or there is no event, do nothing
if not self.alive or not self.eventJournal:
return
# Get the current (last) game state
gs = copy.copy(self.states[-1])
# Switch player turn
if gs.currentPlayer == 0:
gs.currentPlayer = 1
elif gs.currentPlayer == 1:
gs.currentPlayer = 2
elif gs.currentPlayer == 2:
gs.currentPlayer = 1
# Mark the cell clicked by this player if it's an empty cell
x,y = self.eventJournal.pop()
# Check if in bounds
if x < 0 or y < 0 or x >= GameConstants.gridCellHeight or y >= GameConstants.gridCellWidth:
return
# Check if cell is empty
if gs.grid[x][y] == 0:
gs.grid[x][y] = gs.currentPlayer
else: # invalid move
return
# Check if end of game
if self.checkObjectiveState(gs):
self.alive = False
# Add the new modified state
self.states += [gs]
# ====================================
# ORÁCULO
# Tabuleiro Atualizado
# print(f"\nUpdate - Depois de mudar, tabuleiro:\n {gs.grid}\n")
# Limpando o root_node
GameConstants.root_node = None
# Criando um nó raiz conforme a jogada atual
GameConstants.root_node = tree_dfs.NodeBoard()
# Raiz do tabuleiro sendo o estado atual do tabuleiro
GameConstants.root_node.setPositionsPlayed(gs.grid)
# Cria uma arvore conforme o próximo jogador que irá jogar
if (gs.currentPlayer == 1):
# Criar arvore com nó raiz definido e player O que irá jogar
tree_dfs.tree(GameConstants.root_node, 1)
if gs.currentPlayer == 2:
# Criar arvore com nó raiz definido e player X que irá jogar
tree_dfs.tree(GameConstants.root_node, 0)
# Verifica status do jogo
if (GameConstants.root_node.playerX_win):
print("JOGADOR X VENCEU!!")
elif(GameConstants.root_node.playerO_win):
print("JOGADOR O VENCEU!!")
elif(GameConstants.root_node.empate):
print("EMPATOU!!")
else:
# Mostra as possibilidades para o proximo jogador
if gs.currentPlayer == 1:
print("================================================================")
print(f"\n PROBABILIDADES DE JOGADAS PARA 2 O (Ganhar/Perder/Empatar)\n")
print("================================================================\n")
# Dado um nó raiz (tabuleiro), faz a busca em profundidade em cada filho
tree_dfs.probability_next_moves(GameConstants.root_node, 1)
elif gs.currentPlayer == 2:
print("================================================================")
print(f"\n PROBABILIDADES DE JOGADAS PARA 1 X (Ganhar/Perder/Empatar)\n")
print("================================================================\n")
# Dado um nó raiz (tabuleiro), faz a busca em profundidade em cada filho
tree_dfs.probability_next_moves(GameConstants.root_node, 0)
def drawGrid(screen, game):
rects = []
rects = [screen.fill(GameConstants.BackgroundColor)]
# Get the current game state
gs = game.states[-1]
grid = gs.grid
# Draw the grid
for row in range(GameConstants.gridHeight):
for column in range(GameConstants.gridWidth):
color = GameConstants.ColorWhite
if grid[row][column] == 1:
color = GameConstants.ColorRed
elif grid[row][column] == 2:
color = GameConstants.ColorBlue
m = GameConstants.gridMarginSize
w = GameConstants.gridCellWidth
h = GameConstants.gridCellHeight
rects += [pygame.draw.rect(screen, color, [(2*m+w) * column + m, (2*m+h) * row + m, w, h])]
return rects
def draw(screen, font, game):
rects = []
rects += drawGrid(screen, game)
return rects
def initialize():
random.seed(GameConstants.randomSeed)
pygame.init()
game = Game()
font = pygame.font.SysFont('Courier', GameConstants.fontSize)
fpsClock = pygame.time.Clock()
# Create display surface
screen = pygame.display.set_mode((GameConstants.screenWidth, GameConstants.screenHeight), pygame.DOUBLEBUF)
screen.fill(GameConstants.BackgroundColor)
return screen, font, game, fpsClock
def handleEvents(game):
#gs = game.states[-1]
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
col = pos[0] // (GameConstants.screenWidth // GameConstants.gridWidth)
row = pos[1] // (GameConstants.screenHeight // GameConstants.gridHeight)
#print('clicked cell: {}, {}'.format(cellX, cellY))
# send player action to game
game.eventJournal.append((row, col))
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
pygame.quit()
sys.exit()
def mainGamePlayer():
try:
# Initialize pygame and etc.
screen, font, game, fpsClock = initialize()
# Main game loop
while game.alive:
# Handle events
handleEvents(game)
# Update world
game.update()
# Draw this world frame
rects = draw(screen, font, game)
pygame.display.update(rects)
# Delay for required FPS
fpsClock.tick(GameConstants.FPS)
# close up shop
pygame.quit()
except SystemExit:
pass
except Exception as e:
#print("Unexpected error:", sys.exc_info()[0])
traceback.print_exc(file=sys.stdout)
pygame.quit()
#raise Exception from e
if __name__ == "__main__":
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
mainGamePlayer() | [
"pygame.init",
"pygame.quit",
"sys.exit",
"copy.copy",
"tree_dfs.NodeBoard",
"pygame.display.set_mode",
"tree_dfs.tree",
"pygame.mouse.get_pos",
"pygame.draw.rect",
"pygame.display.update",
"traceback.print_exc",
"pygame.time.Clock",
"pygame.font.SysFont",
"tree_dfs.probability_next_moves"... | [((1140, 1160), 'tree_dfs.NodeBoard', 'tree_dfs.NodeBoard', ([], {}), '()\n', (1158, 1160), False, 'import tree_dfs\n'), ((7115, 7152), 'random.seed', 'random.seed', (['GameConstants.randomSeed'], {}), '(GameConstants.randomSeed)\n', (7126, 7152), False, 'import random\n'), ((7158, 7171), 'pygame.init', 'pygame.init', ([], {}), '()\n', (7169, 7171), False, 'import pygame\n'), ((7203, 7257), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Courier"""', 'GameConstants.fontSize'], {}), "('Courier', GameConstants.fontSize)\n", (7222, 7257), False, 'import pygame\n'), ((7274, 7293), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (7291, 7293), False, 'import pygame\n'), ((7340, 7443), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(GameConstants.screenWidth, GameConstants.screenHeight)', 'pygame.DOUBLEBUF'], {}), '((GameConstants.screenWidth, GameConstants.\n screenHeight), pygame.DOUBLEBUF)\n', (7363, 7443), False, 'import pygame\n'), ((7614, 7632), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (7630, 7632), False, 'import pygame\n'), ((9363, 9382), 'os.chdir', 'os.chdir', (['file_path'], {}), '(file_path)\n', (9371, 9382), False, 'import os\n'), ((1245, 1306), 'numpy.zeros', 'np.zeros', (['(GameConstants.gridHeight, GameConstants.gridWidth)'], {}), '((GameConstants.gridHeight, GameConstants.gridWidth))\n', (1253, 1306), True, 'import numpy as np\n'), ((2938, 2964), 'copy.copy', 'copy.copy', (['self.states[-1]'], {}), '(self.states[-1])\n', (2947, 2964), False, 'import copy\n'), ((4183, 4203), 'tree_dfs.NodeBoard', 'tree_dfs.NodeBoard', ([], {}), '()\n', (4201, 4203), False, 'import tree_dfs\n'), ((8795, 8808), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (8806, 8808), False, 'import pygame\n'), ((9331, 9356), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (9346, 9356), False, 'import os\n'), ((4526, 4567), 'tree_dfs.tree', 'tree_dfs.tree', (['GameConstants.root_node', '(1)'], {}), '(GameConstants.root_node, 1)\n', (4539, 4567), False, 'import tree_dfs\n'), ((4692, 4733), 'tree_dfs.tree', 'tree_dfs.tree', (['GameConstants.root_node', '(0)'], {}), '(GameConstants.root_node, 0)\n', (4705, 4733), False, 'import tree_dfs\n'), ((7701, 7723), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (7721, 7723), False, 'import pygame\n'), ((8176, 8189), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (8187, 8189), False, 'import pygame\n'), ((8203, 8213), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8211, 8213), False, 'import sys\n'), ((8632, 8660), 'pygame.display.update', 'pygame.display.update', (['rects'], {}), '(rects)\n', (8653, 8660), False, 'import pygame\n'), ((8940, 8976), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (8959, 8976), False, 'import traceback\n'), ((8986, 8999), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (8997, 8999), False, 'import pygame\n'), ((6855, 6947), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'color', '[(2 * m + w) * column + m, (2 * m + h) * row + m, w, h]'], {}), '(screen, color, [(2 * m + w) * column + m, (2 * m + h) *\n row + m, w, h])\n', (6871, 6947), False, 'import pygame\n'), ((5560, 5619), 'tree_dfs.probability_next_moves', 'tree_dfs.probability_next_moves', (['GameConstants.root_node', '(1)'], {}), '(GameConstants.root_node, 1)\n', (5591, 5619), False, 'import tree_dfs\n'), ((6061, 6120), 'tree_dfs.probability_next_moves', 'tree_dfs.probability_next_moves', (['GameConstants.root_node', '(0)'], {}), '(GameConstants.root_node, 0)\n', (6092, 6120), False, 'import tree_dfs\n')] |
import numpy as np
from inferelator.utils import Validator as check
from inferelator import utils
from inferelator.regression import base_regression
from inferelator.distributed.inferelator_mp import MPControl
from sklearn.base import BaseEstimator
from inferelator.regression.base_regression import _MultitaskRegressionWorkflowMixin
import copy
import inspect
def sklearn_gene(x, y, model, min_coef=None, **kwargs):
"""
Use a scikit-learn model for regression
:param x: Feature array
:type x: np.ndarray [N x K]
:param y: Response array
:type y: np.ndarray [N x 1]
:param model: Instance of a scikit BaseEstimator-derived model
:type model: BaseEstimator
:param min_coef: A minimum coefficient value to include in the model. Any values smaller will be set to 0.
:type min_coef: numeric
:return: A dict of results for this gene
:rtype: dict
"""
assert check.argument_type(x, np.ndarray)
assert check.argument_type(y, np.ndarray)
assert check.argument_is_subclass(model, BaseEstimator)
(N, K) = x.shape
# Fit the model
model.fit(x, y, **kwargs)
# Get all model coefficients [K, ]
try:
coefs = model.coef_
except AttributeError:
coefs = model.estimator_.coef_
# Set coefficients below threshold to 0
if min_coef is not None:
coefs[np.abs(coefs) < min_coef] = 0. # Threshold coefficients
coef_nonzero = coefs != 0 # Create a boolean array where coefficients are nonzero [K, ]
# If there are non-zero coefficients, redo the linear regression with them alone
# And calculate beta_resc
if coef_nonzero.sum() > 0:
x = x[:, coef_nonzero]
utils.make_array_2d(y)
betas = base_regression.recalculate_betas_from_selected(x, y)
betas_resc = base_regression.predict_error_reduction(x, y, betas)
return dict(pp=coef_nonzero,
betas=betas,
betas_resc=betas_resc)
else:
return dict(pp=np.repeat(True, K).tolist(),
betas=np.zeros(K),
betas_resc=np.zeros(K))
class SKLearnRegression(base_regression.BaseRegression):
def __init__(self, x, y, model, random_state=None, **kwargs):
self.params = kwargs
if random_state is not None:
self.params["random_state"] = random_state
self.min_coef = self.params.pop("min_coef", None)
self.model = model(**self.params)
super(SKLearnRegression, self).__init__(x, y)
def regress(self):
"""
Execute Elastic Net
:return: list
Returns a list of regression results that base_regression's pileup_data can process
"""
if MPControl.is_dask():
from inferelator.distributed.dask_functions import sklearn_regress_dask
return sklearn_regress_dask(self.X, self.Y, self.model, self.G, self.genes, self.min_coef)
def regression_maker(j):
level = 0 if j % 100 == 0 else 2
utils.Debug.allprint(base_regression.PROGRESS_STR.format(gn=self.genes[j], i=j, total=self.G), level=level)
data = sklearn_gene(self.X.values,
utils.scale_vector(self.Y.get_gene_data(j, force_dense=True, flatten=True)),
copy.copy(self.model),
min_coef=self.min_coef)
data['ind'] = j
return data
return MPControl.map(regression_maker, range(self.G), tell_children=False)
class SKLearnWorkflowMixin(base_regression._RegressionWorkflowMixin):
"""
Use any scikit-learn regression module
"""
_sklearn_model = None
_sklearn_model_params = None
_sklearn_add_random_state = False
def __init__(self, *args, **kwargs):
self._sklearn_model_params = {}
super(SKLearnWorkflowMixin, self).__init__(*args, **kwargs)
def set_regression_parameters(self, model=None, add_random_state=None, **kwargs):
"""
Set parameters to use a sklearn model for regression
:param model: A scikit-learn model class
:type model: BaseEstimator subclass
:param add_random_state: Flag to include workflow random seed as "random_state" in the model
:type add_random_state: bool
:param kwargs: Any arguments which should be passed to the scikit-learn model class instantiation
:type kwargs: any
"""
if model is not None and not inspect.isclass(model):
raise ValueError("Pass an uninstantiated scikit-learn model (i.e. LinearRegression, not LinearRegression()")
self._set_with_warning("_sklearn_model", model)
self._set_without_warning("_sklearn_add_random_state", add_random_state)
self._sklearn_model_params.update(kwargs)
def run_bootstrap(self, bootstrap):
x = self.design.get_bootstrap(bootstrap)
y = self.response.get_bootstrap(bootstrap)
utils.Debug.vprint('Calculating betas using SKLearn model {m}'.format(m=self._sklearn_model.__name__), level=0)
return SKLearnRegression(x,
y,
self._sklearn_model,
random_state=self.random_seed if self._sklearn_add_random_state else None,
**self._sklearn_model_params).run()
class SKLearnByTaskMixin(_MultitaskRegressionWorkflowMixin, SKLearnWorkflowMixin):
"""
This runs BBSR regression on tasks defined by the AMUSR regression (MTL) workflow
"""
def run_bootstrap(self, bootstrap_idx):
betas, betas_resc = [], []
# Select the appropriate bootstrap from each task and stash the data into X and Y
for k in range(self._n_tasks):
x = self._task_design[k].get_bootstrap(self._task_bootstraps[k][bootstrap_idx])
y = self._task_response[k].get_bootstrap(self._task_bootstraps[k][bootstrap_idx])
utils.Debug.vprint('Calculating task {k} using {n}'.format(k=k, n=self._sklearn_model.__name__), level=0)
t_beta, t_br = SKLearnRegression(x,
y,
self._sklearn_model,
random_state=self.random_seed if self._sklearn_add_random_state else None,
**self._sklearn_model_params).run()
betas.append(t_beta)
betas_resc.append(t_br)
return betas, betas_resc
| [
"numpy.abs",
"numpy.repeat",
"inferelator.utils.Validator.argument_type",
"inferelator.utils.make_array_2d",
"inferelator.distributed.dask_functions.sklearn_regress_dask",
"inferelator.utils.Validator.argument_is_subclass",
"inferelator.regression.base_regression.recalculate_betas_from_selected",
"inf... | [((911, 945), 'inferelator.utils.Validator.argument_type', 'check.argument_type', (['x', 'np.ndarray'], {}), '(x, np.ndarray)\n', (930, 945), True, 'from inferelator.utils import Validator as check\n'), ((957, 991), 'inferelator.utils.Validator.argument_type', 'check.argument_type', (['y', 'np.ndarray'], {}), '(y, np.ndarray)\n', (976, 991), True, 'from inferelator.utils import Validator as check\n'), ((1003, 1051), 'inferelator.utils.Validator.argument_is_subclass', 'check.argument_is_subclass', (['model', 'BaseEstimator'], {}), '(model, BaseEstimator)\n', (1029, 1051), True, 'from inferelator.utils import Validator as check\n'), ((1693, 1715), 'inferelator.utils.make_array_2d', 'utils.make_array_2d', (['y'], {}), '(y)\n', (1712, 1715), False, 'from inferelator import utils\n'), ((1732, 1785), 'inferelator.regression.base_regression.recalculate_betas_from_selected', 'base_regression.recalculate_betas_from_selected', (['x', 'y'], {}), '(x, y)\n', (1779, 1785), False, 'from inferelator.regression import base_regression\n'), ((1807, 1859), 'inferelator.regression.base_regression.predict_error_reduction', 'base_regression.predict_error_reduction', (['x', 'y', 'betas'], {}), '(x, y, betas)\n', (1846, 1859), False, 'from inferelator.regression import base_regression\n'), ((2729, 2748), 'inferelator.distributed.inferelator_mp.MPControl.is_dask', 'MPControl.is_dask', ([], {}), '()\n', (2746, 2748), False, 'from inferelator.distributed.inferelator_mp import MPControl\n'), ((2853, 2941), 'inferelator.distributed.dask_functions.sklearn_regress_dask', 'sklearn_regress_dask', (['self.X', 'self.Y', 'self.model', 'self.G', 'self.genes', 'self.min_coef'], {}), '(self.X, self.Y, self.model, self.G, self.genes, self.\n min_coef)\n', (2873, 2941), False, 'from inferelator.distributed.dask_functions import sklearn_regress_dask\n'), ((1356, 1369), 'numpy.abs', 'np.abs', (['coefs'], {}), '(coefs)\n', (1362, 1369), True, 'import numpy as np\n'), ((2061, 2072), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (2069, 2072), True, 'import numpy as np\n'), ((2105, 2116), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (2113, 2116), True, 'import numpy as np\n'), ((3049, 3121), 'inferelator.regression.base_regression.PROGRESS_STR.format', 'base_regression.PROGRESS_STR.format', ([], {'gn': 'self.genes[j]', 'i': 'j', 'total': 'self.G'}), '(gn=self.genes[j], i=j, total=self.G)\n', (3084, 3121), False, 'from inferelator.regression import base_regression\n'), ((3325, 3346), 'copy.copy', 'copy.copy', (['self.model'], {}), '(self.model)\n', (3334, 3346), False, 'import copy\n'), ((4493, 4515), 'inspect.isclass', 'inspect.isclass', (['model'], {}), '(model)\n', (4508, 4515), False, 'import inspect\n'), ((2006, 2024), 'numpy.repeat', 'np.repeat', (['(True)', 'K'], {}), '(True, K)\n', (2015, 2024), True, 'import numpy as np\n')] |
import matplotlib.image as mpimg
import os
import camera
import numpy as np
import cv2
import matplotlib.pyplot as plt
import config
import line
import time
import math
class ProcessImage():
def __init__(self, config):
self.config = config
self.left_line = line.Line(config)
self.right_line = line.Line(config)
self.ploty = np.linspace(0, config.shape[0]-1, config.shape[0])
self.perf= {'undistort':0,'binary':0,'warp':0,'find_lane':0,'fit_polynomial':0,'print':0}
self.total = 0
def color_binary(self, img):
img = np.copy(img)
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
#h_channel = hls[:,:,0]
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
# taking h_channel into account, yellow's main band is
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= self.config.sx_thresh[0]) & (scaled_sobel <= self.config.sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= self.config.s_thresh[0]) & (s_channel <= self.config.s_thresh[1])] = 1
# Stack each channel
color_binary = (sxbinary + s_binary) * 255
return color_binary
def warp_image(self, img):
warped = cv2.warpPerspective(img, self.config.perspective_M, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR)
return warped
def find_lane_pixels(self, img):
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(img.shape[1]//2)
if (self.left_line.bestx is None or self.left_line.detected == False):
# Take a histogram of the bottom half of the image
histogram_left = np.sum(img[img.shape[0]//2:,:img.shape[1]//2], axis=0)
leftx_base = np.argmax(histogram_left)
margin_left = self.config.margin_default
else:
leftx_base = self.left_line.bestx[-self.config.y_per_frame]
# 4 is a heuristic number to narrow the search margin because we have confident on position based on previous value
margin_left = self.config.margin_default/4
if (self.right_line.bestx is None or self.right_line.detected == False):
histogram_right = np.sum(img[img.shape[0]//2:,img.shape[1]//2:], axis=0)
rightx_base = np.argmax(histogram_right) + midpoint
margin_right = self.config.margin_default
else:
rightx_base = self.right_line.bestx[-self.config.y_per_frame]
margin_right = self.config.margin_default/4
# Set height of windows - based on nwindows above and image shape
window_height = np.int(img.shape[0]//self.config.nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
window_boundary = []
# Step through the windows one by one
for window in range(self.config.nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - window*window_height
### Find the four below boundaries of the window ###
win_xleft_low = leftx_current - margin_left
win_xleft_high = leftx_current + margin_left
win_xright_low = rightx_current - margin_right
win_xright_high = rightx_current + margin_right
window_boundary.append((win_xleft_low, win_xleft_high, win_xright_low, win_xright_high, win_y_low, win_y_high))
### Identify the nonzero pixels in x and y within the window ###
good_left_inds = ((nonzerox > win_xleft_low) & (nonzerox < win_xleft_high) &
(nonzeroy > win_y_low) & (nonzeroy < win_y_high)).nonzero()[0]
good_right_inds = ((nonzerox > win_xright_low) & (nonzerox < win_xright_high) &
(nonzeroy > win_y_low) & (nonzeroy < win_y_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
### The adaptive margin shall be related to a reasonable range of road curvature
### The idea is that a road shall not curve too much. Therefore, if it cannot find
### the line in one window, we can assume it will not be too far away from the
### the range of curvature in the next window.
### If you found > minpix pixels, recenter next window ###
### (`right` or `leftx_current`) on their mean position ###
if len(good_left_inds) > self.config.minpix:
new_leftx = int(np.mean(nonzerox[good_left_inds]))
if (abs(new_leftx - leftx_current) < margin_left / 2):
leftx_current = new_leftx
margin_left = self.config.margin_default
else:
margin_left = margin_left + self.config.margin_default
else:
margin_left = margin_left + self.config.margin_default
if len(good_right_inds) > self.config.minpix:
new_rightx = int(np.mean(nonzerox[good_right_inds]))
if (abs(new_rightx - rightx_current) < margin_right / 2):
rightx_current = new_rightx
margin_right = self.config.margin_default
else:
margin_right = margin_right + self.config.margin_default;
else:
margin_right = margin_right + self.config.margin_default;
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
self.left_line.allx = nonzerox[left_lane_inds]
self.left_line.ally = nonzeroy[left_lane_inds]
self.right_line.allx = nonzerox[right_lane_inds]
self.right_line.ally = nonzeroy[right_lane_inds]
return np.int32(window_boundary)
def find_window_centroids(self, image):
window_centroids = [] # Store the (left,right) window centroid positions per level
window = np.ones(self.config.window_width) # Create our window template that we will use for convolutions
window_height = np.int(image.shape[0]//self.config.nwindows)
# First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice
# and then np.convolve the vertical image slice with the window template
# Sum quarter bottom of image to get slice, could use a different ratio
l_sum = np.sum(image[int(3*image.shape[0]/4):,:int(image.shape[1]/2)], axis=0)
l_convolution = np.convolve(window,l_sum)
l_center = np.argmax(l_convolution)-self.config.window_width/2
r_sum = np.sum(image[int(3*image.shape[0]/4):,int(image.shape[1]/2):], axis=0)
r_convolution = np.convolve(window,r_sum)
r_center = np.argmax(r_convolution)-self.config.window_width/2+int(image.shape[1]/2)
margin = self.config.margin_default
# Add what we found for the first layer
window_centroids.append((l_center,r_center))
# Go through each layer looking for max pixel locations
for level in range(1,(int)(image.shape[0]/window_height)):
# convolve the window into the vertical slice of the image
image_layer = np.sum(image[int(image.shape[0]-(level+1)*window_height):int(image.shape[0]-level*window_height),:], axis=0)
conv_signal = np.convolve(window, image_layer)
# Find the best left centroid by using past left center as a reference
# Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window
offset = self.config.window_width/2
l_min_index = int(max(l_center+offset-margin,0))
l_max_index = int(min(l_center+offset+margin,image.shape[1]))
l_center = np.argmax(conv_signal[l_min_index:l_max_index])+l_min_index-offset
# Find the best right centroid by using past right center as a reference
r_min_index = int(max(r_center+offset-margin,0))
r_max_index = int(min(r_center+offset+margin,image.shape[1]))
r_center = np.argmax(conv_signal[r_min_index:r_max_index])+r_min_index-offset
# Add what we found for that layer
window_centroids.append((l_center,r_center))
return window_centroids
def printOverlay(self, undistort, warped):
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([self.left_line.bestx, self.ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([self.right_line.bestx, self.ploty])))])
# pts_left = np.array([np.transpose(np.vstack([self.left_line.recent_xfitted[-1], self.ploty]))])
# pts_right = np.array([np.flipud(np.transpose(np.vstack([self.right_line.recent_xfitted[-1], self.ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
offset = (self.left_line.line_base_pos + self.right_line.line_base_pos)/2
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, self.config.perspective_Minv, (self.config.shape[1], self.config.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undistort, 1, newwarp, 0.3, 0)
cv2.putText(result,'Radius of Curvature = ' + str((int)((self.left_line.radius_of_curvature + self.right_line.radius_of_curvature) / 2)) + "(m)", (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
placetext= "center"
if offset > 0:
placetext = "{0:.2f}".format(offset) +"m left of center"
elif offset < 0:
placetext = "{0:.2f}".format(-offset) +"m right of center"
cv2.putText(result,'Vehicle is ' + placetext, (10,60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
return result
def process_image_fit(self, undistort):
warped = self.process_image_warped(undistort)
start = time.time()
self.find_lane_pixels(warped)
self.perf['find_lane'] = self.perf['find_lane'] + (time.time() - start)
out_img = np.dstack((warped, warped, warped))
window_boundary = self.find_lane_pixels(warped)
# Step through the windows one by one
for window in window_boundary:
# Draw the windows on the visualization image
cv2.rectangle(out_img,(window[0],window[4]),
(window[1],window[5]),(0,255,0), 2)
cv2.rectangle(out_img,(window[2],window[4]),
(window[3],window[5]),(0,255,0), 2)
start = time.time()
self.left_line.fit_polynomial(self.ploty, warped.shape[1])
self.right_line.fit_polynomial(self.ploty, warped.shape[1])
self.perf['fit_polynomial'] = self.perf['fit_polynomial'] + (time.time() - start)
## Visualization ##
# Colors in the left and right lane regions and Plots the left and right polynomials on the lane lines
out_img[self.left_line.ally, self.left_line.allx] = [255, 0, 0]
out_img[self.right_line.ally, self.right_line.allx] = [0, 0, 255]
for j in range(len(self.left_line.recent_xfitted)):
for i in range(len(self.ploty)):
x = self.left_line.recent_xfitted[j][i];
if (x >= 0 and x < undistort.shape[1]):
out_img[np.int32(self.ploty[i]), np.int32(x)] = [0, 255, 0]
for j in range(len(self.right_line.recent_xfitted)):
for i in range(len(self.ploty)):
x = self.right_line.recent_xfitted[j][i];
if (x >= 0 and x < undistort.shape[1]):
out_img[np.int32(self.ploty[i]), np.int32(x)] = [0, 255, 0]
return out_img
def process_image_warped(self, undistort):
binary = self.process_image_binary(undistort)
start = time.time()
warped = self.warp_image(binary)
self.perf['warp'] = self.perf['warp'] + (time.time() - start)
return warped
def process_image_binary(self, undistort):
start = time.time()
binary = self.color_binary(undistort)
self.perf['binary'] = self.perf['binary'] + (time.time() - start)
return binary
def undistort(self, img):
start = time.time()
undistort = self.config.camera.undistort(img)
self.perf['undistort'] = self.perf['undistort'] + (time.time() - start)
return undistort
def process_image(self, undistort):
start = time.time()
binary = self.color_binary(undistort)
self.perf['binary'] = self.perf['binary'] + (time.time() - start)
start = time.time()
warped = self.warp_image(binary)
self.perf['warp'] = self.perf['warp'] + (time.time() - start)
start = time.time()
self.find_lane_pixels(warped)
self.perf['find_lane'] = self.perf['find_lane'] + (time.time() - start)
start = time.time()
self.left_line.fit_polynomial(self.ploty, warped.shape[1])
self.right_line.fit_polynomial(self.ploty, warped.shape[1])
self.perf['fit_polynomial'] = self.perf['fit_polynomial'] + (time.time() - start)
start = time.time()
output = self.printOverlay(undistort, warped)
self.perf['print'] = self.perf['print'] + (time.time() - start)
self.total = self.total + 1
return output
| [
"cv2.rectangle",
"numpy.convolve",
"numpy.hstack",
"numpy.int32",
"numpy.array",
"cv2.warpPerspective",
"numpy.mean",
"numpy.max",
"cv2.addWeighted",
"numpy.linspace",
"numpy.vstack",
"numpy.concatenate",
"numpy.ones",
"numpy.argmax",
"cv2.putText",
"cv2.cvtColor",
"numpy.int_",
"n... | [((278, 295), 'line.Line', 'line.Line', (['config'], {}), '(config)\n', (287, 295), False, 'import line\n'), ((322, 339), 'line.Line', 'line.Line', (['config'], {}), '(config)\n', (331, 339), False, 'import line\n'), ((361, 413), 'numpy.linspace', 'np.linspace', (['(0)', '(config.shape[0] - 1)', 'config.shape[0]'], {}), '(0, config.shape[0] - 1, config.shape[0])\n', (372, 413), True, 'import numpy as np\n'), ((581, 593), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (588, 593), True, 'import numpy as np\n'), ((672, 708), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (684, 708), False, 'import cv2\n'), ((902, 940), 'cv2.Sobel', 'cv2.Sobel', (['l_channel', 'cv2.CV_64F', '(1)', '(0)'], {}), '(l_channel, cv2.CV_64F, 1, 0)\n', (911, 940), False, 'import cv2\n'), ((989, 1008), 'numpy.absolute', 'np.absolute', (['sobelx'], {}), '(sobelx)\n', (1000, 1008), True, 'import numpy as np\n'), ((1192, 1219), 'numpy.zeros_like', 'np.zeros_like', (['scaled_sobel'], {}), '(scaled_sobel)\n', (1205, 1219), True, 'import numpy as np\n'), ((1384, 1408), 'numpy.zeros_like', 'np.zeros_like', (['s_channel'], {}), '(s_channel)\n', (1397, 1408), True, 'import numpy as np\n'), ((1669, 1779), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'self.config.perspective_M', '(img.shape[1], img.shape[0])'], {'flags': 'cv2.INTER_LINEAR'}), '(img, self.config.perspective_M, (img.shape[1], img.\n shape[0]), flags=cv2.INTER_LINEAR)\n', (1688, 1779), False, 'import cv2\n'), ((1996, 2021), 'numpy.int', 'np.int', (['(img.shape[1] // 2)'], {}), '(img.shape[1] // 2)\n', (2002, 2021), True, 'import numpy as np\n'), ((3147, 3191), 'numpy.int', 'np.int', (['(img.shape[0] // self.config.nwindows)'], {}), '(img.shape[0] // self.config.nwindows)\n', (3153, 3191), True, 'import numpy as np\n'), ((3317, 3337), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (3325, 3337), True, 'import numpy as np\n'), ((3357, 3377), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (3365, 3377), True, 'import numpy as np\n'), ((7076, 7101), 'numpy.int32', 'np.int32', (['window_boundary'], {}), '(window_boundary)\n', (7084, 7101), True, 'import numpy as np\n'), ((7255, 7288), 'numpy.ones', 'np.ones', (['self.config.window_width'], {}), '(self.config.window_width)\n', (7262, 7288), True, 'import numpy as np\n'), ((7376, 7422), 'numpy.int', 'np.int', (['(image.shape[0] // self.config.nwindows)'], {}), '(image.shape[0] // self.config.nwindows)\n', (7382, 7422), True, 'import numpy as np\n'), ((7818, 7844), 'numpy.convolve', 'np.convolve', (['window', 'l_sum'], {}), '(window, l_sum)\n', (7829, 7844), True, 'import numpy as np\n'), ((8026, 8052), 'numpy.convolve', 'np.convolve', (['window', 'r_sum'], {}), '(window, r_sum)\n', (8037, 8052), True, 'import numpy as np\n'), ((9748, 9792), 'numpy.dstack', 'np.dstack', (['(warp_zero, warp_zero, warp_zero)'], {}), '((warp_zero, warp_zero, warp_zero))\n', (9757, 9792), True, 'import numpy as np\n'), ((10302, 10334), 'numpy.hstack', 'np.hstack', (['(pts_left, pts_right)'], {}), '((pts_left, pts_right))\n', (10311, 10334), True, 'import numpy as np\n'), ((10643, 10755), 'cv2.warpPerspective', 'cv2.warpPerspective', (['color_warp', 'self.config.perspective_Minv', '(self.config.shape[1], self.config.shape[0])'], {}), '(color_warp, self.config.perspective_Minv, (self.config.\n shape[1], self.config.shape[0]))\n', (10662, 10755), False, 'import cv2\n'), ((10821, 10867), 'cv2.addWeighted', 'cv2.addWeighted', (['undistort', '(1)', 'newwarp', '(0.3)', '(0)'], {}), '(undistort, 1, newwarp, 0.3, 0)\n', (10836, 10867), False, 'import cv2\n'), ((11302, 11412), 'cv2.putText', 'cv2.putText', (['result', "('Vehicle is ' + placetext)", '(10, 60)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(255, 255, 255)', '(2)'], {}), "(result, 'Vehicle is ' + placetext, (10, 60), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\n", (11313, 11412), False, 'import cv2\n'), ((11541, 11552), 'time.time', 'time.time', ([], {}), '()\n', (11550, 11552), False, 'import time\n'), ((11689, 11724), 'numpy.dstack', 'np.dstack', (['(warped, warped, warped)'], {}), '((warped, warped, warped))\n', (11698, 11724), True, 'import numpy as np\n'), ((12150, 12161), 'time.time', 'time.time', ([], {}), '()\n', (12159, 12161), False, 'import time\n'), ((13415, 13426), 'time.time', 'time.time', ([], {}), '()\n', (13424, 13426), False, 'import time\n'), ((13624, 13635), 'time.time', 'time.time', ([], {}), '()\n', (13633, 13635), False, 'import time\n'), ((13825, 13836), 'time.time', 'time.time', ([], {}), '()\n', (13834, 13836), False, 'import time\n'), ((14053, 14064), 'time.time', 'time.time', ([], {}), '()\n', (14062, 14064), False, 'import time\n'), ((14201, 14212), 'time.time', 'time.time', ([], {}), '()\n', (14210, 14212), False, 'import time\n'), ((14340, 14351), 'time.time', 'time.time', ([], {}), '()\n', (14349, 14351), False, 'import time\n'), ((14486, 14497), 'time.time', 'time.time', ([], {}), '()\n', (14495, 14497), False, 'import time\n'), ((14739, 14750), 'time.time', 'time.time', ([], {}), '()\n', (14748, 14750), False, 'import time\n'), ((2191, 2250), 'numpy.sum', 'np.sum', (['img[img.shape[0] // 2:, :img.shape[1] // 2]'], {'axis': '(0)'}), '(img[img.shape[0] // 2:, :img.shape[1] // 2], axis=0)\n', (2197, 2250), True, 'import numpy as np\n'), ((2271, 2296), 'numpy.argmax', 'np.argmax', (['histogram_left'], {}), '(histogram_left)\n', (2280, 2296), True, 'import numpy as np\n'), ((2731, 2790), 'numpy.sum', 'np.sum', (['img[img.shape[0] // 2:, img.shape[1] // 2:]'], {'axis': '(0)'}), '(img[img.shape[0] // 2:, img.shape[1] // 2:], axis=0)\n', (2737, 2790), True, 'import numpy as np\n'), ((6576, 6606), 'numpy.concatenate', 'np.concatenate', (['left_lane_inds'], {}), '(left_lane_inds)\n', (6590, 6606), True, 'import numpy as np\n'), ((6637, 6668), 'numpy.concatenate', 'np.concatenate', (['right_lane_inds'], {}), '(right_lane_inds)\n', (6651, 6668), True, 'import numpy as np\n'), ((7863, 7887), 'numpy.argmax', 'np.argmax', (['l_convolution'], {}), '(l_convolution)\n', (7872, 7887), True, 'import numpy as np\n'), ((8645, 8677), 'numpy.convolve', 'np.convolve', (['window', 'image_layer'], {}), '(window, image_layer)\n', (8656, 8677), True, 'import numpy as np\n'), ((10421, 10435), 'numpy.int_', 'np.int_', (['[pts]'], {}), '([pts])\n', (10428, 10435), True, 'import numpy as np\n'), ((11936, 12027), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(window[0], window[4])', '(window[1], window[5])', '(0, 255, 0)', '(2)'], {}), '(out_img, (window[0], window[4]), (window[1], window[5]), (0, \n 255, 0), 2)\n', (11949, 12027), False, 'import cv2\n'), ((12041, 12132), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(window[2], window[4])', '(window[3], window[5])', '(0, 255, 0)', '(2)'], {}), '(out_img, (window[2], window[4]), (window[3], window[5]), (0, \n 255, 0), 2)\n', (12054, 12132), False, 'import cv2\n'), ((1121, 1139), 'numpy.max', 'np.max', (['abs_sobelx'], {}), '(abs_sobelx)\n', (1127, 1139), True, 'import numpy as np\n'), ((2812, 2838), 'numpy.argmax', 'np.argmax', (['histogram_right'], {}), '(histogram_right)\n', (2821, 2838), True, 'import numpy as np\n'), ((8071, 8095), 'numpy.argmax', 'np.argmax', (['r_convolution'], {}), '(r_convolution)\n', (8080, 8095), True, 'import numpy as np\n'), ((9688, 9709), 'numpy.zeros_like', 'np.zeros_like', (['warped'], {}), '(warped)\n', (9701, 9709), True, 'import numpy as np\n'), ((11650, 11661), 'time.time', 'time.time', ([], {}), '()\n', (11659, 11661), False, 'import time\n'), ((12366, 12377), 'time.time', 'time.time', ([], {}), '()\n', (12375, 12377), False, 'import time\n'), ((13517, 13528), 'time.time', 'time.time', ([], {}), '()\n', (13526, 13528), False, 'import time\n'), ((13735, 13746), 'time.time', 'time.time', ([], {}), '()\n', (13744, 13746), False, 'import time\n'), ((13950, 13961), 'time.time', 'time.time', ([], {}), '()\n', (13959, 13961), False, 'import time\n'), ((14164, 14175), 'time.time', 'time.time', ([], {}), '()\n', (14173, 14175), False, 'import time\n'), ((14303, 14314), 'time.time', 'time.time', ([], {}), '()\n', (14312, 14314), False, 'import time\n'), ((14449, 14460), 'time.time', 'time.time', ([], {}), '()\n', (14458, 14460), False, 'import time\n'), ((14702, 14713), 'time.time', 'time.time', ([], {}), '()\n', (14711, 14713), False, 'import time\n'), ((14856, 14867), 'time.time', 'time.time', ([], {}), '()\n', (14865, 14867), False, 'import time\n'), ((5543, 5576), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (5550, 5576), True, 'import numpy as np\n'), ((6034, 6068), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (6041, 6068), True, 'import numpy as np\n'), ((9078, 9125), 'numpy.argmax', 'np.argmax', (['conv_signal[l_min_index:l_max_index]'], {}), '(conv_signal[l_min_index:l_max_index])\n', (9087, 9125), True, 'import numpy as np\n'), ((9376, 9423), 'numpy.argmax', 'np.argmax', (['conv_signal[r_min_index:r_max_index]'], {}), '(conv_signal[r_min_index:r_max_index])\n', (9385, 9423), True, 'import numpy as np\n'), ((9910, 9955), 'numpy.vstack', 'np.vstack', (['[self.left_line.bestx, self.ploty]'], {}), '([self.left_line.bestx, self.ploty])\n', (9919, 9955), True, 'import numpy as np\n'), ((10012, 10058), 'numpy.vstack', 'np.vstack', (['[self.right_line.bestx, self.ploty]'], {}), '([self.right_line.bestx, self.ploty])\n', (10021, 10058), True, 'import numpy as np\n'), ((12922, 12945), 'numpy.int32', 'np.int32', (['self.ploty[i]'], {}), '(self.ploty[i])\n', (12930, 12945), True, 'import numpy as np\n'), ((12947, 12958), 'numpy.int32', 'np.int32', (['x'], {}), '(x)\n', (12955, 12958), True, 'import numpy as np\n'), ((13222, 13245), 'numpy.int32', 'np.int32', (['self.ploty[i]'], {}), '(self.ploty[i])\n', (13230, 13245), True, 'import numpy as np\n'), ((13247, 13258), 'numpy.int32', 'np.int32', (['x'], {}), '(x)\n', (13255, 13258), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Test module for level set transport
"""
from __future__ import print_function
from builtins import range
from builtins import object
from proteus.iproteus import *
import os
import numpy as np
import tables
from . import (ls_vortex_2d_p,
redist_vortex_2d_p,
vof_vortex_2d_p,
ls_consrv_vortex_2d_p,
ls_vortex_2d_n,
redist_vortex_2d_n,
vof_vortex_2d_n,
ls_consrv_vortex_2d_n,
ls_vortex_2d_so)
class TestVortex2D(object):
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def setup_method(self,method):
self.aux_names = []
self.meshdir = os.path.dirname(os.path.abspath(__file__))
self._scriptdir = os.path.dirname(os.path.abspath(__file__))
def teardown_method(self,method):
filenames = []
for aux_name in self.aux_names:
filenames.extend([aux_name+'.'+ext for ext in ['h5','xmf']])
filenames.append('proteus.log')
for f in filenames:
if os.path.exists(f):
try:
os.remove(f)
except OSError as e:
print ("Error: %s - %s" %(e.filename,e.strerror))
else:
pass
def test_vortex2D(self,use_strong_constraints=False):
from proteus import default_s
reload(default_s)
opts.logLevel=7
opts.verbose=True
opts.profile=True
opts.gatherArchive=True
sList=[]
if ls_vortex_2d_so.sList == []:
for i in range(len(ls_vortex_2d_so.pnList)):
s = default_s
sList.append(s)
else:
sList = ls_vortex_2d_so.sList
ns = NumericalSolution.NS_base(ls_vortex_2d_so,
[ls_vortex_2d_p,
redist_vortex_2d_p,
vof_vortex_2d_p,
ls_consrv_vortex_2d_p],
[ls_vortex_2d_n,
redist_vortex_2d_n,
vof_vortex_2d_n,
ls_consrv_vortex_2d_n],
sList,
opts)
ns.calculateSolution(ls_vortex_2d_so.name)
self.aux_names.append(ls_vortex_2d_so.name)
# COMPARE VS SAVED FILES #
expected_path = ls_vortex_2d_so.name+'_expected.h5'
expected = tables.open_file(os.path.join(self._scriptdir,expected_path))
actual = tables.open_file(ls_vortex_2d_so.name+'.h5','r')
assert np.allclose(expected.root.u_t80,
actual.root.u_t80,
atol=1e-10)
assert np.allclose(expected.root.phid_t80,
actual.root.phid_t80,
atol=1e-10)
assert np.allclose(expected.root.vof_t80,
actual.root.vof_t80,
atol=1e-10)
expected.close()
actual.close()
del ns
if __name__ == '__main__':
pass
| [
"os.path.exists",
"numpy.allclose",
"os.path.join",
"tables.open_file",
"os.path.abspath",
"os.remove"
] | [((2760, 2811), 'tables.open_file', 'tables.open_file', (["(ls_vortex_2d_so.name + '.h5')", '"""r"""'], {}), "(ls_vortex_2d_so.name + '.h5', 'r')\n", (2776, 2811), False, 'import tables\n'), ((2824, 2887), 'numpy.allclose', 'np.allclose', (['expected.root.u_t80', 'actual.root.u_t80'], {'atol': '(1e-10)'}), '(expected.root.u_t80, actual.root.u_t80, atol=1e-10)\n', (2835, 2887), True, 'import numpy as np\n'), ((2957, 3026), 'numpy.allclose', 'np.allclose', (['expected.root.phid_t80', 'actual.root.phid_t80'], {'atol': '(1e-10)'}), '(expected.root.phid_t80, actual.root.phid_t80, atol=1e-10)\n', (2968, 3026), True, 'import numpy as np\n'), ((3096, 3163), 'numpy.allclose', 'np.allclose', (['expected.root.vof_t80', 'actual.root.vof_t80'], {'atol': '(1e-10)'}), '(expected.root.vof_t80, actual.root.vof_t80, atol=1e-10)\n', (3107, 3163), True, 'import numpy as np\n'), ((786, 811), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (801, 811), False, 'import os\n'), ((855, 880), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (870, 880), False, 'import os\n'), ((1148, 1165), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (1162, 1165), False, 'import os\n'), ((2698, 2742), 'os.path.join', 'os.path.join', (['self._scriptdir', 'expected_path'], {}), '(self._scriptdir, expected_path)\n', (2710, 2742), False, 'import os\n'), ((1208, 1220), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (1217, 1220), False, 'import os\n')] |
# web-app for API image manipulation
from flask import Flask, request, render_template, send_from_directory
import os
from PIL import Image
import tensorflow as tf
import cv2
import numpy as np
from model import generator_model
app = Flask(__name__)
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
# default access page
@app.route("/")
def main():
return render_template('index.html')
# upload selected image and forward to processing page
@app.route("/upload", methods=["POST"])
def upload():
target = os.path.join(APP_ROOT, 'static/images/')
# create image directory if not found
if not os.path.isdir(target):
os.mkdir(target)
# retrieve file from html file-picker
upload = request.files.getlist("file")[0]
print("File name: {}".format(upload.filename))
filename = upload.filename
# file support verification
ext = os.path.splitext(filename)[1]
if (ext == ".jpg") or (ext == ".png") or (ext == ".bmp"):
print("File accepted")
else:
return render_template("error.html", message="The selected file is not supported"), 400
# save file
destination = "/".join([target, filename])
print("File saved to to:", destination)
upload.save(destination)
# forward to processing page
return render_template("processing.html", image_name=filename)
# flip filename 'vertical' or 'horizontal'
@app.route("/colorize", methods=["POST"])
def colorize():
filename = request.form['image']
# open and process image
target = os.path.join(APP_ROOT, 'static/images')
destination = "/".join([target, filename])
img = Image.open(destination)
img = img.resize((224,224),Image.ANTIALIAS)
img_array = np.array(img)
if len(np.shape(img_array)) == 2:
img_array = np.reshape(img_array,(224,224,1))
gray = img_array[:,:,0]
gray = np.reshape(gray,(1,224,224,1))
# Initialize the model and load the weights
model = generator_model()
model.load_weights("./static/model0.h5")
# Normalize the gray image and make prediction
maximum_img = np.max(gray)
max_divided = maximum_img/2
gray = (gray-max_divided)/max_divided
predicted_image_lab = model.predict(gray)
predicted_image_lab = np.reshape(predicted_image_lab,(224,224,3))
# Convert in the predicted image in RGB
img = 127.5*predicted_image_lab + 127.5
img = img.astype('uint8')
img = cv2.cvtColor(img, cv2.COLOR_LAB2RGB)
img = Image.fromarray(img)
# save and return image
destination = "/".join([target, 'temp.png'])
if os.path.isfile(destination):
os.remove(destination)
img.save(destination)
return send_image('temp.png')
# blend filename with stock photo and alpha parameter
@app.route("/blend", methods=["POST"])
def blend():
# retrieve parameters from html form
alpha = request.form['alpha']
filename1 = request.form['image']
# open images
target = os.path.join(APP_ROOT, 'static/images')
filename2 = 'blend.jpg'
destination1 = "/".join([target, filename1])
destination2 = "/".join([target, filename2])
img1 = Image.open(destination1)
img2 = Image.open(destination2)
# resize images to max dimensions
width = max(img1.size[0], img2.size[0])
height = max(img1.size[1], img2.size[1])
img1 = img1.resize((width, height), Image.ANTIALIAS)
img2 = img2.resize((width, height), Image.ANTIALIAS)
# if image in gray scale, convert stock image to monochrome
if len(img1.mode) < 3:
img2 = img2.convert('L')
# blend and show image
img = Image.blend(img1, img2, float(alpha)/100)
# save and return image
destination = "/".join([target, 'temp.png'])
if os.path.isfile(destination):
os.remove(destination)
img.save(destination)
return send_image('temp.png')
# retrieve file from 'static/images' directory
@app.route('/static/images/<filename>')
def send_image(filename):
return send_from_directory("static/images", filename)
if __name__ == "__main__":
app.run()
| [
"flask.render_template",
"flask.Flask",
"numpy.array",
"os.remove",
"flask.send_from_directory",
"numpy.reshape",
"numpy.max",
"os.path.isdir",
"os.mkdir",
"os.path.splitext",
"os.path.isfile",
"cv2.cvtColor",
"numpy.shape",
"PIL.Image.fromarray",
"PIL.Image.open",
"flask.request.files... | [((235, 250), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (240, 250), False, 'from flask import Flask, request, render_template, send_from_directory\n'), ((279, 304), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (294, 304), False, 'import os\n'), ((369, 398), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (384, 398), False, 'from flask import Flask, request, render_template, send_from_directory\n'), ((523, 563), 'os.path.join', 'os.path.join', (['APP_ROOT', '"""static/images/"""'], {}), "(APP_ROOT, 'static/images/')\n", (535, 563), False, 'import os\n'), ((1291, 1346), 'flask.render_template', 'render_template', (['"""processing.html"""'], {'image_name': 'filename'}), "('processing.html', image_name=filename)\n", (1306, 1346), False, 'from flask import Flask, request, render_template, send_from_directory\n'), ((1536, 1575), 'os.path.join', 'os.path.join', (['APP_ROOT', '"""static/images"""'], {}), "(APP_ROOT, 'static/images')\n", (1548, 1575), False, 'import os\n'), ((1634, 1657), 'PIL.Image.open', 'Image.open', (['destination'], {}), '(destination)\n', (1644, 1657), False, 'from PIL import Image\n'), ((1722, 1735), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1730, 1735), True, 'import numpy as np\n'), ((1882, 1916), 'numpy.reshape', 'np.reshape', (['gray', '(1, 224, 224, 1)'], {}), '(gray, (1, 224, 224, 1))\n', (1892, 1916), True, 'import numpy as np\n'), ((1974, 1991), 'model.generator_model', 'generator_model', ([], {}), '()\n', (1989, 1991), False, 'from model import generator_model\n'), ((2111, 2123), 'numpy.max', 'np.max', (['gray'], {}), '(gray)\n', (2117, 2123), True, 'import numpy as np\n'), ((2270, 2316), 'numpy.reshape', 'np.reshape', (['predicted_image_lab', '(224, 224, 3)'], {}), '(predicted_image_lab, (224, 224, 3))\n', (2280, 2316), True, 'import numpy as np\n'), ((2447, 2483), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_LAB2RGB'], {}), '(img, cv2.COLOR_LAB2RGB)\n', (2459, 2483), False, 'import cv2\n'), ((2494, 2514), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (2509, 2514), False, 'from PIL import Image\n'), ((2604, 2631), 'os.path.isfile', 'os.path.isfile', (['destination'], {}), '(destination)\n', (2618, 2631), False, 'import os\n'), ((2985, 3024), 'os.path.join', 'os.path.join', (['APP_ROOT', '"""static/images"""'], {}), "(APP_ROOT, 'static/images')\n", (2997, 3024), False, 'import os\n'), ((3163, 3187), 'PIL.Image.open', 'Image.open', (['destination1'], {}), '(destination1)\n', (3173, 3187), False, 'from PIL import Image\n'), ((3199, 3223), 'PIL.Image.open', 'Image.open', (['destination2'], {}), '(destination2)\n', (3209, 3223), False, 'from PIL import Image\n'), ((3758, 3785), 'os.path.isfile', 'os.path.isfile', (['destination'], {}), '(destination)\n', (3772, 3785), False, 'import os\n'), ((4005, 4051), 'flask.send_from_directory', 'send_from_directory', (['"""static/images"""', 'filename'], {}), "('static/images', filename)\n", (4024, 4051), False, 'from flask import Flask, request, render_template, send_from_directory\n'), ((618, 639), 'os.path.isdir', 'os.path.isdir', (['target'], {}), '(target)\n', (631, 639), False, 'import os\n'), ((649, 665), 'os.mkdir', 'os.mkdir', (['target'], {}), '(target)\n', (657, 665), False, 'import os\n'), ((722, 751), 'flask.request.files.getlist', 'request.files.getlist', (['"""file"""'], {}), "('file')\n", (743, 751), False, 'from flask import Flask, request, render_template, send_from_directory\n'), ((880, 906), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (896, 906), False, 'import os\n'), ((1804, 1840), 'numpy.reshape', 'np.reshape', (['img_array', '(224, 224, 1)'], {}), '(img_array, (224, 224, 1))\n', (1814, 1840), True, 'import numpy as np\n'), ((2641, 2663), 'os.remove', 'os.remove', (['destination'], {}), '(destination)\n', (2650, 2663), False, 'import os\n'), ((3795, 3817), 'os.remove', 'os.remove', (['destination'], {}), '(destination)\n', (3804, 3817), False, 'import os\n'), ((1028, 1103), 'flask.render_template', 'render_template', (['"""error.html"""'], {'message': '"""The selected file is not supported"""'}), "('error.html', message='The selected file is not supported')\n", (1043, 1103), False, 'from flask import Flask, request, render_template, send_from_directory\n'), ((1757, 1776), 'numpy.shape', 'np.shape', (['img_array'], {}), '(img_array)\n', (1765, 1776), True, 'import numpy as np\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Optimize chains of single-qubit gates using Euler 1q decomposer"""
import logging
import numpy as np
from qiskit.quantum_info import Operator
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.quantum_info.synthesis import one_qubit_decompose
from qiskit.converters import circuit_to_dag
LOG = logging.getLogger(__name__)
class Optimize1qGatesDecomposition(TransformationPass):
"""Optimize chains of single-qubit gates by combining them into a single gate."""
def __init__(self, basis=None):
"""Optimize1qGatesDecomposition initializer.
Args:
basis (list[str]): Basis gates to consider, e.g. `['u3', 'cx']`. For the effects
of this pass, the basis is the set intersection between the `basis` parameter
and the Euler basis.
"""
super().__init__()
self.basis = None
if basis:
self.basis = []
basis_set = set(basis)
for basis_name, gates in one_qubit_decompose.ONE_QUBIT_EULER_BASIS_GATES.items():
if set(gates).issubset(basis_set):
self.basis.append(one_qubit_decompose.OneQubitEulerDecomposer(basis_name))
def run(self, dag):
"""Run the Optimize1qGatesDecomposition pass on `dag`.
Args:
dag (DAGCircuit): the DAG to be optimized.
Returns:
DAGCircuit: the optimized DAG.
"""
if not self.basis:
LOG.info("Skipping pass because no basis is set")
return dag
runs = dag.collect_1q_runs()
identity_matrix = np.eye(2)
for run in runs:
# Don't try to optimize a single 1q gate
if len(run) <= 1:
params = run[0].op.params
# Remove single identity gates
if len(params) > 0 and np.array_equal(run[0].op.to_matrix(),
identity_matrix):
dag.remove_op_node(run[0])
continue
new_circs = []
operator = Operator(run[0].op)
for gate in run[1:]:
operator = operator.compose(gate.op)
for decomposer in self.basis:
new_circs.append(decomposer(operator))
if new_circs:
new_circ = min(new_circs, key=len)
if len(run) > len(new_circ):
new_dag = circuit_to_dag(new_circ)
dag.substitute_node_with_dag(run[0], new_dag)
# Delete the other nodes in the run
for current_node in run[1:]:
dag.remove_op_node(current_node)
return dag
| [
"logging.getLogger",
"qiskit.quantum_info.synthesis.one_qubit_decompose.ONE_QUBIT_EULER_BASIS_GATES.items",
"numpy.eye",
"qiskit.quantum_info.Operator",
"qiskit.converters.circuit_to_dag",
"qiskit.quantum_info.synthesis.one_qubit_decompose.OneQubitEulerDecomposer"
] | [((806, 833), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (823, 833), False, 'import logging\n'), ((2099, 2108), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (2105, 2108), True, 'import numpy as np\n'), ((1490, 1545), 'qiskit.quantum_info.synthesis.one_qubit_decompose.ONE_QUBIT_EULER_BASIS_GATES.items', 'one_qubit_decompose.ONE_QUBIT_EULER_BASIS_GATES.items', ([], {}), '()\n', (1543, 1545), False, 'from qiskit.quantum_info.synthesis import one_qubit_decompose\n'), ((2578, 2597), 'qiskit.quantum_info.Operator', 'Operator', (['run[0].op'], {}), '(run[0].op)\n', (2586, 2597), False, 'from qiskit.quantum_info import Operator\n'), ((2933, 2957), 'qiskit.converters.circuit_to_dag', 'circuit_to_dag', (['new_circ'], {}), '(new_circ)\n', (2947, 2957), False, 'from qiskit.converters import circuit_to_dag\n'), ((1636, 1691), 'qiskit.quantum_info.synthesis.one_qubit_decompose.OneQubitEulerDecomposer', 'one_qubit_decompose.OneQubitEulerDecomposer', (['basis_name'], {}), '(basis_name)\n', (1679, 1691), False, 'from qiskit.quantum_info.synthesis import one_qubit_decompose\n')] |
import time
import cv2
import imutils
import numpy as np
import pyautogui
from imutils.video import WebcamVideoStream
from .finger_tracking import HandDetector
class FingerDetector:
def __init__(self, cam, smooth=9):
self.cam = cam
self.width = 640
self.height = 480
self.screen_size = pyautogui.size()
self.detector = HandDetector(detectionCon=0.8)
self.clocX = 0
self.clocY = 0
self.plocX = 0
self.plocY = 0
self.smooth = smooth
self.noFinger = False
self.clear = False
def get_finger(self):
img = self.cam.read()
img = imutils.resize(img, width=self.width, height=self.height)
hands, img = self.detector.detect_hands(img)
if hands:
hand = hands[-1]
landmarks = hand["marks"] # landmarks
fingers = self.detector.find_fingers(hand)
if len(landmarks) != 0:
posx, posy = landmarks[8]
posx1, posy1 = landmarks[12]
self.noFinger = False
self.clear = False
if sum(fingers) == 5:
self.clear = True
else:
self.clear = False
if fingers[1] == 1 and fingers[2] == 0:
img = self.fingers_move(img, posx, posy)
elif fingers[1] == 1 and fingers[2] == 1:
length, info, img = self.detector.compute_dist((posx, posy), (posx1, posy1), img)
if length < 40:
cv2.circle(img, (info[4], info[5]), 10, (0, 255, 0), cv2.FILLED)
pyautogui.click(button='left')
else:
self.noFinger = True
img = self.fingers_move(img, posx, posy)
return img
def fingers_move(self, img, posx, posy, offset=0):
mousex = np.interp(posx, (0, self.width), (0, self.screen_size[0]))
mousey = np.interp(posy, (0, self.height), (0, self.screen_size[1]))
self.clocX = self.plocX + (mousex - self.plocX) / self.smooth
self.clocY = self.plocY + (mousey - self.plocY) / self.smooth
if mousex < self.screen_size[0] and mousey < self.screen_size[1]:
if offset == 0:
pyautogui.moveTo(self.screen_size[0] - self.clocX, self.clocY, _pause=False)
else:
pyautogui.moveTo(self.screen_size[0] - self.clocX, self.clocY - offset, _pause=False)
cv2.circle(img, (posx, posy), 10, (255, 0, 0), cv2.FILLED)
self.plocX, self.plocY = self.clocX, self.clocY
return img
def fingers_click(self, img, finger1, finger2):
(posx, posy) = finger1
(posx1, posy1) = finger2
length, info, img = self.detector.compute_dist((posx, posy), (posx1, posy1), img)
if length < 40:
cv2.circle(img, (info[4], info[5]), 10, (0, 255, 0), cv2.FILLED)
pyautogui.click(button="left")
return img
def show(self, img):
cv2.namedWindow("Personal Note Writer") # Create a named window
cv2.moveWindow("Personal Note Writer", self.screen_size[0] // 3, 0)
cv2.imshow("Personal Note Writer", img)
cv2.waitKey(1)
def run(self):
start = time.time()
while True:
img = self.get_finger()
end = time.time()
fps = 1 / (end - start)
img = cv2.flip(img, 1)
cv2.putText(img, str(int(fps)), (20, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)
self.show(img)
start = end
def main():
cam = WebcamVideoStream(src=0).start()
detector = FingerDetector(cam)
detector.run()
if __name__ == '__main__':
main()
| [
"cv2.moveWindow",
"imutils.video.WebcamVideoStream",
"cv2.flip",
"pyautogui.moveTo",
"pyautogui.size",
"cv2.imshow",
"pyautogui.click",
"imutils.resize",
"cv2.circle",
"numpy.interp",
"time.time",
"cv2.waitKey",
"cv2.namedWindow"
] | [((326, 342), 'pyautogui.size', 'pyautogui.size', ([], {}), '()\n', (340, 342), False, 'import pyautogui\n'), ((647, 704), 'imutils.resize', 'imutils.resize', (['img'], {'width': 'self.width', 'height': 'self.height'}), '(img, width=self.width, height=self.height)\n', (661, 704), False, 'import imutils\n'), ((1916, 1974), 'numpy.interp', 'np.interp', (['posx', '(0, self.width)', '(0, self.screen_size[0])'], {}), '(posx, (0, self.width), (0, self.screen_size[0]))\n', (1925, 1974), True, 'import numpy as np\n'), ((1992, 2051), 'numpy.interp', 'np.interp', (['posy', '(0, self.height)', '(0, self.screen_size[1])'], {}), '(posy, (0, self.height), (0, self.screen_size[1]))\n', (2001, 2051), True, 'import numpy as np\n'), ((3063, 3102), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Personal Note Writer"""'], {}), "('Personal Note Writer')\n", (3078, 3102), False, 'import cv2\n'), ((3136, 3203), 'cv2.moveWindow', 'cv2.moveWindow', (['"""Personal Note Writer"""', '(self.screen_size[0] // 3)', '(0)'], {}), "('Personal Note Writer', self.screen_size[0] // 3, 0)\n", (3150, 3203), False, 'import cv2\n'), ((3212, 3251), 'cv2.imshow', 'cv2.imshow', (['"""Personal Note Writer"""', 'img'], {}), "('Personal Note Writer', img)\n", (3222, 3251), False, 'import cv2\n'), ((3260, 3274), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3271, 3274), False, 'import cv2\n'), ((3311, 3322), 'time.time', 'time.time', ([], {}), '()\n', (3320, 3322), False, 'import time\n'), ((2521, 2579), 'cv2.circle', 'cv2.circle', (['img', '(posx, posy)', '(10)', '(255, 0, 0)', 'cv2.FILLED'], {}), '(img, (posx, posy), 10, (255, 0, 0), cv2.FILLED)\n', (2531, 2579), False, 'import cv2\n'), ((2902, 2966), 'cv2.circle', 'cv2.circle', (['img', '(info[4], info[5])', '(10)', '(0, 255, 0)', 'cv2.FILLED'], {}), '(img, (info[4], info[5]), 10, (0, 255, 0), cv2.FILLED)\n', (2912, 2966), False, 'import cv2\n'), ((2979, 3009), 'pyautogui.click', 'pyautogui.click', ([], {'button': '"""left"""'}), "(button='left')\n", (2994, 3009), False, 'import pyautogui\n'), ((3397, 3408), 'time.time', 'time.time', ([], {}), '()\n', (3406, 3408), False, 'import time\n'), ((3463, 3479), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (3471, 3479), False, 'import cv2\n'), ((3652, 3676), 'imutils.video.WebcamVideoStream', 'WebcamVideoStream', ([], {'src': '(0)'}), '(src=0)\n', (3669, 3676), False, 'from imutils.video import WebcamVideoStream\n'), ((2312, 2388), 'pyautogui.moveTo', 'pyautogui.moveTo', (['(self.screen_size[0] - self.clocX)', 'self.clocY'], {'_pause': '(False)'}), '(self.screen_size[0] - self.clocX, self.clocY, _pause=False)\n', (2328, 2388), False, 'import pyautogui\n'), ((2423, 2512), 'pyautogui.moveTo', 'pyautogui.moveTo', (['(self.screen_size[0] - self.clocX)', '(self.clocY - offset)'], {'_pause': '(False)'}), '(self.screen_size[0] - self.clocX, self.clocY - offset,\n _pause=False)\n', (2439, 2512), False, 'import pyautogui\n'), ((1567, 1631), 'cv2.circle', 'cv2.circle', (['img', '(info[4], info[5])', '(10)', '(0, 255, 0)', 'cv2.FILLED'], {}), '(img, (info[4], info[5]), 10, (0, 255, 0), cv2.FILLED)\n', (1577, 1631), False, 'import cv2\n'), ((1656, 1686), 'pyautogui.click', 'pyautogui.click', ([], {'button': '"""left"""'}), "(button='left')\n", (1671, 1686), False, 'import pyautogui\n')] |
from matplotlib import pyplot as plt
import numpy as np
import os
if __name__ == '__main__':
cores = []
time = []
for d in os.listdir("./output/"):
print(d)
if(os.path.isdir("./output/"+d)):
print(d)
cores.append(int(d.split("_")[1]))
with open("./output/{}/results.txt".format(d), 'r') as fout:
lines = fout.readlines()
time.append(float(lines[-2].split(" ")[-2]))
np.save("./times.npy", time)
np.save("./cores.npy", cores)
#print(time)
time = np.array(time)
fig, ax = plt.subplots()
ax.scatter(cores, time[0]/time, color='b', marker='s', edgecolor='k', label="Actual Speedup")
ax.plot([0, max(cores)], [0, max(cores)], linestyle='--', label="Theoretical Speedup",
color='#888888')
ax.set(xlabel="Number of Cores", ylabel="Time (s)")
plt.savefig("./speedup.png")
plt.show()
| [
"os.listdir",
"matplotlib.pyplot.savefig",
"numpy.array",
"os.path.isdir",
"matplotlib.pyplot.subplots",
"numpy.save",
"matplotlib.pyplot.show"
] | [((136, 159), 'os.listdir', 'os.listdir', (['"""./output/"""'], {}), "('./output/')\n", (146, 159), False, 'import os\n'), ((467, 495), 'numpy.save', 'np.save', (['"""./times.npy"""', 'time'], {}), "('./times.npy', time)\n", (474, 495), True, 'import numpy as np\n'), ((500, 529), 'numpy.save', 'np.save', (['"""./cores.npy"""', 'cores'], {}), "('./cores.npy', cores)\n", (507, 529), True, 'import numpy as np\n'), ((558, 572), 'numpy.array', 'np.array', (['time'], {}), '(time)\n', (566, 572), True, 'import numpy as np\n'), ((587, 601), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (599, 601), True, 'from matplotlib import pyplot as plt\n'), ((879, 907), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./speedup.png"""'], {}), "('./speedup.png')\n", (890, 907), True, 'from matplotlib import pyplot as plt\n'), ((912, 922), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (920, 922), True, 'from matplotlib import pyplot as plt\n'), ((189, 219), 'os.path.isdir', 'os.path.isdir', (["('./output/' + d)"], {}), "('./output/' + d)\n", (202, 219), False, 'import os\n')] |
from snu.snu import Vector
from snu.snu import Twist
from snu.snu import Wrench
from snu.snu import Quaternion
import numpy as np
import pytest
def test_vector():
v = Vector(1.,2.,3.)
# assert v.x == 1 and v.y == 2 and v.z == 3
assert np.all([v, [1,2,3]])
assert np.all([v.to_tuple(), [1,2,3]])
assert isinstance(v.to_tuple(), tuple)
assert v == v
# with pytest.raises(Exception):
# if v != v:
# raise Exception()
for i,val in enumerate([1,2,3]):
assert v[i] == val
vv = Vector(4,5,6)
m = vv-v
# assert m.x == 3 and m.y == 3 and m.z == 3
assert np.all([m, [3,3,3]])
m = v-vv
# assert m.x == -3 and m.y == -3 and m.z == -3
assert np.all([m, [-3,-3,-3]])
m = .1*vv
# assert np.all([m.to_tuple(), [.4,.5,.6]])
assert np.all([m, [.4,.5,.6]])
# assert vv.x == 4 and vv.y == 5 and vv.z == 6
assert np.all([vv, [4,5,6]])
def test_twist():
t = Twist()
print(t)
assert True
def test_quaternion():
q = Quaternion()
assert q.w == 1.0
| [
"snu.snu.Quaternion",
"numpy.all",
"snu.snu.Vector",
"snu.snu.Twist"
] | [((173, 194), 'snu.snu.Vector', 'Vector', (['(1.0)', '(2.0)', '(3.0)'], {}), '(1.0, 2.0, 3.0)\n', (179, 194), False, 'from snu.snu import Vector\n'), ((249, 271), 'numpy.all', 'np.all', (['[v, [1, 2, 3]]'], {}), '([v, [1, 2, 3]])\n', (255, 271), True, 'import numpy as np\n'), ((539, 554), 'snu.snu.Vector', 'Vector', (['(4)', '(5)', '(6)'], {}), '(4, 5, 6)\n', (545, 554), False, 'from snu.snu import Vector\n'), ((625, 647), 'numpy.all', 'np.all', (['[m, [3, 3, 3]]'], {}), '([m, [3, 3, 3]])\n', (631, 647), True, 'import numpy as np\n'), ((722, 747), 'numpy.all', 'np.all', (['[m, [-3, -3, -3]]'], {}), '([m, [-3, -3, -3]])\n', (728, 747), True, 'import numpy as np\n'), ((820, 848), 'numpy.all', 'np.all', (['[m, [0.4, 0.5, 0.6]]'], {}), '([m, [0.4, 0.5, 0.6]])\n', (826, 848), True, 'import numpy as np\n'), ((907, 930), 'numpy.all', 'np.all', (['[vv, [4, 5, 6]]'], {}), '([vv, [4, 5, 6]])\n', (913, 930), True, 'import numpy as np\n'), ((956, 963), 'snu.snu.Twist', 'Twist', ([], {}), '()\n', (961, 963), False, 'from snu.snu import Twist\n'), ((1025, 1037), 'snu.snu.Quaternion', 'Quaternion', ([], {}), '()\n', (1035, 1037), False, 'from snu.snu import Quaternion\n')] |
# -*- coding: utf-8 -*-
import logging
import utool as ut
import numpy as np # NOQA
(print, rrr, profile) = ut.inject2(__name__, '[_wbia_object]')
logger = logging.getLogger('wbia')
def _find_wbia_attrs(ibs, objname, blacklist=[]):
r"""
Developer function to help figure out what attributes are available
Args:
ibs (wbia.IBEISController): images analysis api
CommandLine:
python -m wbia.images _find_wbia_attrs
Example:
>>> # DISABLE_DOCTEST
>>> from wbia._wbia_object import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='testdb1')
>>> objname = 'images'
>>> blacklist = []
>>> _find_wbia_attrs(ibs, objname, blacklist)
Example:
>>> # DISABLE_DOCTEST
>>> from wbia._wbia_object import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='testdb1')
>>> objname = 'annot'
>>> blacklist = ['annot_pair']
>>> _find_wbia_attrs(ibs, objname, blacklist)
"""
import re
getter_prefix = 'get_' + objname + '_'
found_getters = ut.search_module(ibs, getter_prefix)
pat = getter_prefix + ut.named_field('attr', '.*')
for stopword in blacklist:
found_getters = [fn for fn in found_getters if stopword not in fn]
matched_getters = [re.match(pat, fn).groupdict()['attr'] for fn in found_getters]
setter_prefix = 'set_' + objname + '_'
found_setters = ut.search_module(ibs, setter_prefix)
pat = setter_prefix + ut.named_field('attr', '.*')
for stopword in blacklist:
found_setters = [fn for fn in found_setters if stopword not in fn]
matched_setters = [re.match(pat, fn).groupdict()['attr'] for fn in found_setters]
return matched_getters, matched_setters
def _inject_getter_attrs(
metaself,
objname,
attrs,
configurable_attrs,
depc_name=None,
depcache_attrs=None,
settable_attrs=None,
aliased_attrs=None,
):
"""
Used by the metaclass to inject methods and properties into the class
inheriting from ObjectList1D
"""
if settable_attrs is None:
settable_attrs = []
settable_attrs = set(settable_attrs)
# Inform the class of which variables will be injected
metaself._settable_attrs = settable_attrs
metaself._attrs = attrs
metaself._configurable_attrs = configurable_attrs
if depcache_attrs is None:
metaself._depcache_attrs = []
else:
metaself._depcache_attrs = ['%s_%s' % (tbl, col) for tbl, col in depcache_attrs]
if aliased_attrs is not None:
metaself._attrs_aliases = aliased_attrs
else:
metaself._attrs_aliases = {}
# if not getattr(metaself, '__needs_inject__', True):
# return
attr_to_aliases = ut.invert_dict(metaself._attrs_aliases, unique_vals=False)
# What is difference between configurable and depcache getters?
# Could depcache getters just be made configurable?
# I guess its just an efficincy thing. Actually its config2_-vs-config
# FIXME: rectify differences between normal / configurable / depcache
# getter
def _make_caching_setter(attrname, _rowid_setter):
def _setter(self, values, *args, **kwargs):
if self._ibs is None:
self._internal_attrs[attrname] = values
else:
if self._caching and attrname in self._internal_attrs:
self._internal_attrs[attrname] = values
_rowid_setter(self, self._rowids, values)
ut.set_funcname(_setter, '_set_' + attrname)
return _setter
def _make_caching_getter(attrname, _rowid_getter):
def _getter(self):
if self._ibs is None or (self._caching and attrname in self._internal_attrs):
data = self._internal_attrs[attrname]
else:
data = _rowid_getter(self, self._rowids)
if self._caching:
self._internal_attrs[attrname] = data
return data
ut.set_funcname(_getter, '_get_' + attrname)
return _getter
# make default version use implicit rowids and another
# that takes explicit rowids.
def _make_setters(objname, attrname):
ibs_funcname = 'set_%s_%s' % (objname, attrname)
def _rowid_setter(self, rowids, values, *args, **kwargs):
ibs_callable = getattr(self._ibs, ibs_funcname)
ibs_callable(rowids, values, *args, **kwargs)
ut.set_funcname(_rowid_setter, '_rowid_set_' + attrname)
_setter = _make_caching_setter(attrname, _rowid_setter)
return _rowid_setter, _setter
# ---
def _make_getters(objname, attrname):
ibs_funcname = 'get_%s_%s' % (objname, attrname)
def _rowid_getter(self, rowids):
ibs_callable = getattr(self._ibs, ibs_funcname)
data = ibs_callable(rowids)
if self._asarray:
data = np.array(data)
return data
ut.set_funcname(_rowid_getter, '_rowid_get_' + attrname)
_getter = _make_caching_getter(attrname, _rowid_getter)
return _rowid_getter, _getter
def _make_cfg_getters(objname, attrname):
ibs_funcname = 'get_%s_%s' % (objname, attrname)
def _rowid_getter(self, rowids):
ibs_callable = getattr(self._ibs, ibs_funcname)
data = ibs_callable(rowids, config2_=self._config)
if self._asarray:
data = np.array(data)
return data
ut.set_funcname(_rowid_getter, '_rowid_get_' + attrname)
_getter = _make_caching_getter(attrname, _rowid_getter)
return _rowid_getter, _getter
def _make_depc_getters(depc_name, attrname, tbl, col):
def _rowid_getter(self, rowids):
depc = getattr(self._ibs, depc_name)
data = depc.get(tbl, rowids, col, config=self._config)
if self._asarray:
data = np.array(data)
return data
ut.set_funcname(_rowid_getter, '_rowid_get_' + attrname)
_getter = _make_caching_getter(attrname, _rowid_getter)
return _rowid_getter, _getter
# Collect setter / getter functions and properties
rowid_getters = []
getters = []
setters = []
properties = []
for attrname in attrs:
_rowid_getter, _getter = _make_getters(objname, attrname)
if attrname in settable_attrs:
_rowid_setter, _setter = _make_setters(objname, attrname)
setters.append(_setter)
else:
_setter = None
prop = property(fget=_getter, fset=_setter)
rowid_getters.append((attrname, _rowid_getter))
getters.append(_getter)
properties.append((attrname, prop))
for attrname in configurable_attrs:
_rowid_getter, _getter = _make_cfg_getters(objname, attrname)
prop = property(fget=_getter)
rowid_getters.append((attrname, _rowid_getter))
getters.append(_getter)
properties.append((attrname, prop))
if depcache_attrs is not None:
for tbl, col in depcache_attrs:
attrname = '%s_%s' % (tbl, col)
_rowid_getter, _getter = _make_depc_getters(depc_name, attrname, tbl, col)
prop = property(fget=_getter, fset=None)
rowid_getters.append((attrname, _rowid_getter))
getters.append(_getter)
properties.append((attrname, prop))
aliases = []
# Inject all gathered information
for attrname, func in rowid_getters:
funcname = ut.get_funcname(func)
setattr(metaself, funcname, func)
# ensure aliases have rowid getters
for alias in attr_to_aliases.get(attrname, []):
alias_funcname = '_rowid_get_' + alias
setattr(metaself, alias_funcname, func)
for func in getters:
funcname = ut.get_funcname(func)
setattr(metaself, funcname, func)
for func in setters:
funcname = ut.get_funcname(func)
setattr(metaself, funcname, func)
for attrname, prop in properties:
setattr(metaself, attrname, prop)
for alias in attr_to_aliases.pop(attrname, []):
aliases.append((alias, attrname))
setattr(metaself, alias, prop)
if ut.get_argflag('--autogen-core'):
# TODO: turn on autogenertion given a flag
def expand_closure_source(funcname, func):
source = ut.get_func_sourcecode(func)
closure_vars = [
(k, v.cell_contents)
for k, v in zip(func.func_code.co_freevars, func.func_closure)
]
source = ut.unindent(source)
import re
for k, v in closure_vars:
source = re.sub('\\b' + k + '\\b', ut.repr2(v), source)
source = re.sub(r'def .*\(self', 'def ' + funcname + '(self', source)
source = ut.indent(source.strip(), ' ') + '\n'
return source
explicit_lines = []
# build explicit version for jedi?
for funcname, func in getters:
source = expand_closure_source(funcname, func)
explicit_lines.append(source)
# build explicit version for jedi?
for funcname, func in setters:
source = expand_closure_source(funcname, func)
explicit_lines.append(source)
for attrname, prop in properties:
getter_name = None if prop.fget is None else ut.get_funcname(prop.fget)
setter_name = None if prop.fset is None else ut.get_funcname(prop.fset)
source = ' %s = property(%s, %s)' % (attrname, getter_name, setter_name)
explicit_lines.append(source)
for alias, attrname in aliases:
source = ' %s = %s' % (alias, attrname)
explicit_lines.append(source)
explicit_source = (
'\n'.join(
[
'from wbia import _wbia_object',
'',
'',
'class _%s_base_class(_wbia_object.ObjectList1D):',
' __needs_inject__ = False',
'',
]
)
% (objname,)
)
explicit_source += '\n'.join(explicit_lines)
explicit_fname = '_autogen_%s_base.py' % (objname,)
from os.path import dirname, join
ut.writeto(join(dirname(__file__), explicit_fname), explicit_source + '\n')
if attr_to_aliases:
raise AssertionError('Unmapped aliases %r' % (attr_to_aliases,))
class ObjectScalar0D(ut.NiceRepr, ut.HashComparable2):
"""
This actually stores a ObjectList1D of length 1 and
simply calls those functions where available
"""
def __init__(self, obj1d):
assert len(obj1d) == 1
self.obj1d = obj1d
def __nice__(self):
return 'rowid=%s, uuid=%s' % (self._rowids, self.uuids)
def __getattr__(self, key):
vals = getattr(self.obj1d, key)
if key == 'show':
return vals
return vals[0]
def __dir__(self):
attrs = dir(object)
attrs += list(self.__class__.__dict__.keys())
attrs += self.obj1d.__vector_attributes__()
return attrs
def _make_lazy_dict(self):
"""
CommandLine:
python -m wbia._wbia_object ObjectScalar0D._make_lazy_dict
Example:
>>> # DISABLE_DOCTEST
>>> from wbia._wbia_object import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb('testdb1')
>>> annots = ibs.annots()
>>> subset = annots.take([0, 2, 5])
>>> scalar = annots[0]
>>> assert scalar.obj1d._attrs == annots._attrs
>>> self = scalar
>>> print(dir(self))
>>> metadata = self._make_lazy_dict()
>>> print('metadata = %r' % (metadata,))
>>> aid = metadata['aid']
>>> print('aid = %r' % (aid,))
"""
metadata = ut.LazyDict()
for attr in self.obj1d.__vector_attributes__():
metadata[attr] = ut.partial(getattr, self, attr)
return metadata
# @ut.reloadable_class
class ObjectList1D(ut.NiceRepr, ut.HashComparable2):
"""
An object that efficiently operates on a list of wbia objects using
vectorized code. Single instances can be returned as ObjectScalar0D's
"""
def __init__(self, rowids, ibs, config=None, caching=False, asarray=False):
self._rowids = rowids
# self._islist = True
# Internal cache
self._internal_attrs = {}
# Internal behaviors
self._ibs = ibs
self._config = config
self._caching = caching
# Private attributes
self._rowid_to_idx = None
self._asarray = asarray
# ut.make_index_lookup(self._rowids)
def __vector_attributes__(self):
attrs = (
self._attrs
+ self._configurable_attrs
+ self._depcache_attrs
+ list(self._attrs_aliases.keys())
)
return attrs
def set_caching(self, flag):
self._caching = flag
def __nice__(self):
return 'num=%r' % (len(self))
def __hash__(self):
return hash(self.group_uuid())
def __add__(self, other):
assert self.__class__ is other.__class__, 'incompatable'
assert self._ibs is other._ibs, 'incompatable'
assert self._config is other._config, 'incompatable'
rowids = ut.unique(self._rowids + other._rowids)
new = self.__class__(rowids, self._ibs, self._config)
return new
def take(self, idxs):
"""
Creates a subset of the list using the specified indices.
"""
rowids = ut.take(self._rowids, idxs)
# Create a new instance pointing only to the requested subset
newself = self.__class__(
rowids, ibs=self._ibs, config=self._config, caching=self._caching
)
# Pass along any internally cached values
_new_internal = {
key: ut.take(val, idxs) for key, val in self._internal_attrs.items()
}
newself._internal_attrs = _new_internal
return newself
def preload(self, *attrs):
assert self._ibs is not None, 'must be connected to preload'
for attrname in attrs:
self._internal_attrs[attrname] = getattr(self, attrname)
def group_uuid(self):
sorted_uuids = sorted(self.uuids)
group_uuid = ut.util_hash.augment_uuid(*sorted_uuids)
return group_uuid
def disconnect(self):
"""
Disconnects object from the state of the database. All information has
been assumed to be preloaded.
"""
self._ibs = None
def __iter__(self):
return iter(self._rowids)
def __len__(self):
return len(self._rowids)
def __getitem__(self, idx):
if isinstance(idx, slice):
idxs = list(range(*idx.indices(len(self))))
return self.take(idxs)
if not ut.isiterable(idx):
obj0d_ = self.take([idx])
obj0d = ObjectScalar0D(obj0d_)
return obj0d
if not isinstance(idx, slice):
raise AssertionError('only slice supported currently')
return self.take(idx)
def scalars(self):
scalar_list = [self[idx] for idx in range(len(self))]
return scalar_list
def compress(self, flags):
idxs = ut.where(flags)
return self.take(idxs)
def take_column(self, keys):
vals_list = zip(*[getattr(self, key) for key in keys])
dict_list = [dict(zip(keys, vals)) for vals in vals_list]
return dict_list
def chunks(self, chunksize):
for idxs in ut.ichunks(self, range(len(self))):
yield self.take(idxs)
def group_indicies(self, labels):
unique_labels, groupxs = ut.group_indices(labels)
return unique_labels, groupxs
def group_items(self, labels):
""" group as dict """
unique_labels, groups = self.group(labels)
label_to_group = ut.odict(zip(unique_labels, groups))
return label_to_group
def group(self, labels):
""" group as list """
unique_labels, groupxs = self.group_indicies(labels)
groups = [self.take(idxs) for idxs in groupxs]
return unique_labels, groups
def lookup_idxs(self, rowids):
""" Lookup subset indicies by rowids """
if self._rowid_to_idx is None:
self._rowid_to_idx = ut.make_index_lookup(self._rowids)
idx_list = ut.take(self._rowid_to_idx, rowids)
return idx_list
def loc(self, rowids):
""" Lookup subset by rowids """
idxs = self.lookup_idxs(rowids)
return self.take(idxs)
# def filter(self, filterkw):
# pass
# def filter_flags(self, filterkw):
# pass
def view(self, rowids=None):
"""
Like take, but returns a view proxy that maps to the original parent
"""
if rowids is None:
rowids = self._rowids
# unique_parent = self.take(unique_idxs)
view = ObjectView1D(rowids, obj1d=self)
return view
class ObjectView1D(ut.NiceRepr):
# ut.HashComparable2):
"""
Allows for proxy caching.
Example:
>>> # ENABLE_DOCTEST
>>> from wbia._wbia_object import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='testdb1')
>>> aids = ibs.get_valid_aids()
>>> a = self = annots = ibs.annots(aids)
>>> rowids = [1, 1, 3, 2, 1, 2]
>>> self = v = a.view(rowids)
>>> assert np.all(v.vecs[0] == v.vecs[1])
>>> assert v.vecs[0] is v.vecs[1]
>>> assert v.vecs[0] is not v.vecs[2]
"""
def __init__(self, rowids, obj1d, cache=None):
self._rowids = list(rowids)
self._obj1d = obj1d
self._unique_rowids = set(self._rowids)
self._unique_inverse = ut.list_alignment(self._unique_rowids, self._rowids)
if cache is None:
self._cache = ut.ddict(dict)
else:
self._cache = cache
# Views always cache data for now
self._caching = True
def __dir__(self):
attrs = dir(object)
attrs += self.__dict__.keys()
attrs += ['__dict__', '__module__', '__weakref__']
# ['_unique_parent', '_caching', '_attr_rowid_value', '_rowids']
attrs += list(self.__class__.__dict__.keys())
attrs += self._obj1d.__vector_attributes__()
return attrs
def __vector_attributes__(self):
return self._obj1d.__vector_attributes__()
def __getattr__(self, key):
"""
key = 'vecs'
"""
try:
_rowid_getter = getattr(self._obj1d, '_rowid_get_%s' % (key,))
except AttributeError:
raise AttributeError('ObjectView1D has no attribute %r' % (key,))
if self._caching:
rowid_to_value = self._cache[key]
miss_rowids = [
rowid for rowid in self._unique_rowids if rowid not in rowid_to_value
]
miss_data = _rowid_getter(miss_rowids)
for rowid, value in zip(miss_rowids, miss_data):
rowid_to_value[rowid] = value
unique_data = ut.take(rowid_to_value, self._unique_rowids)
else:
unique_data = _rowid_getter(self._unique_rowids)
data = ut.take(unique_data, self._unique_inverse)
return data
def __iter__(self):
return iter(self._rowids)
def __len__(self):
return len(self._rowids)
def __nice__(self):
return 'unique=%r, num=%r' % (len(self._unique_rowids), len(self))
# def __hash__(self):
# return hash(self.group_uuid())
def view(self, rowids):
"""
returns a view of a view that uses the same per-item cache
Example:
>>> # ENABLE_DOCTEST
>>> from wbia._wbia_object import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='testdb1')
>>> aids = ibs.get_valid_aids()
>>> annots = ibs.annots(aids)
>>> self = annots.view(annots._rowids)
>>> v1 = self.view([1, 1, 2, 3, 1, 2])
>>> v2 = self.view([3, 4, 5])
>>> v3 = self.view([1, 4])
>>> v4 = self.view(3)
>>> lazy4 = v4._make_lazy_dict()
>>> assert v1.vecs[0] is v3.vecs[0]
>>> assert v2._cache is self._cache
>>> assert v2._cache is v1._cache
"""
if ut.isiterable(rowids):
childview = self.__class__(rowids, obj1d=self._obj1d, cache=self._cache)
else:
childview = self.__class__([rowids], obj1d=self._obj1d, cache=self._cache)
childview = ObjectScalar0D(childview)
return childview
| [
"logging.getLogger",
"utool.unindent",
"utool.list_alignment",
"utool.isiterable",
"utool.make_index_lookup",
"utool.invert_dict",
"utool.take",
"numpy.array",
"utool.get_funcname",
"utool.repr2",
"utool.search_module",
"utool.set_funcname",
"utool.get_func_sourcecode",
"utool.partial",
... | [((110, 148), 'utool.inject2', 'ut.inject2', (['__name__', '"""[_wbia_object]"""'], {}), "(__name__, '[_wbia_object]')\n", (120, 148), True, 'import utool as ut\n'), ((158, 183), 'logging.getLogger', 'logging.getLogger', (['"""wbia"""'], {}), "('wbia')\n", (175, 183), False, 'import logging\n'), ((1113, 1149), 'utool.search_module', 'ut.search_module', (['ibs', 'getter_prefix'], {}), '(ibs, getter_prefix)\n', (1129, 1149), True, 'import utool as ut\n'), ((1461, 1497), 'utool.search_module', 'ut.search_module', (['ibs', 'setter_prefix'], {}), '(ibs, setter_prefix)\n', (1477, 1497), True, 'import utool as ut\n'), ((2784, 2842), 'utool.invert_dict', 'ut.invert_dict', (['metaself._attrs_aliases'], {'unique_vals': '(False)'}), '(metaself._attrs_aliases, unique_vals=False)\n', (2798, 2842), True, 'import utool as ut\n'), ((8290, 8322), 'utool.get_argflag', 'ut.get_argflag', (['"""--autogen-core"""'], {}), "('--autogen-core')\n", (8304, 8322), True, 'import utool as ut\n'), ((1176, 1204), 'utool.named_field', 'ut.named_field', (['"""attr"""', '""".*"""'], {}), "('attr', '.*')\n", (1190, 1204), True, 'import utool as ut\n'), ((1524, 1552), 'utool.named_field', 'ut.named_field', (['"""attr"""', '""".*"""'], {}), "('attr', '.*')\n", (1538, 1552), True, 'import utool as ut\n'), ((3544, 3588), 'utool.set_funcname', 'ut.set_funcname', (['_setter', "('_set_' + attrname)"], {}), "(_setter, '_set_' + attrname)\n", (3559, 3588), True, 'import utool as ut\n'), ((4039, 4083), 'utool.set_funcname', 'ut.set_funcname', (['_getter', "('_get_' + attrname)"], {}), "(_getter, '_get_' + attrname)\n", (4054, 4083), True, 'import utool as ut\n'), ((4495, 4551), 'utool.set_funcname', 'ut.set_funcname', (['_rowid_setter', "('_rowid_set_' + attrname)"], {}), "(_rowid_setter, '_rowid_set_' + attrname)\n", (4510, 4551), True, 'import utool as ut\n'), ((5008, 5064), 'utool.set_funcname', 'ut.set_funcname', (['_rowid_getter', "('_rowid_get_' + attrname)"], {}), "(_rowid_getter, '_rowid_get_' + attrname)\n", (5023, 5064), True, 'import utool as ut\n'), ((5537, 5593), 'utool.set_funcname', 'ut.set_funcname', (['_rowid_getter', "('_rowid_get_' + attrname)"], {}), "(_rowid_getter, '_rowid_get_' + attrname)\n", (5552, 5593), True, 'import utool as ut\n'), ((6014, 6070), 'utool.set_funcname', 'ut.set_funcname', (['_rowid_getter', "('_rowid_get_' + attrname)"], {}), "(_rowid_getter, '_rowid_get_' + attrname)\n", (6029, 6070), True, 'import utool as ut\n'), ((7571, 7592), 'utool.get_funcname', 'ut.get_funcname', (['func'], {}), '(func)\n', (7586, 7592), True, 'import utool as ut\n'), ((7883, 7904), 'utool.get_funcname', 'ut.get_funcname', (['func'], {}), '(func)\n', (7898, 7904), True, 'import utool as ut\n'), ((7992, 8013), 'utool.get_funcname', 'ut.get_funcname', (['func'], {}), '(func)\n', (8007, 8013), True, 'import utool as ut\n'), ((12035, 12048), 'utool.LazyDict', 'ut.LazyDict', ([], {}), '()\n', (12046, 12048), True, 'import utool as ut\n'), ((13536, 13575), 'utool.unique', 'ut.unique', (['(self._rowids + other._rowids)'], {}), '(self._rowids + other._rowids)\n', (13545, 13575), True, 'import utool as ut\n'), ((13791, 13818), 'utool.take', 'ut.take', (['self._rowids', 'idxs'], {}), '(self._rowids, idxs)\n', (13798, 13818), True, 'import utool as ut\n'), ((14540, 14580), 'utool.util_hash.augment_uuid', 'ut.util_hash.augment_uuid', (['*sorted_uuids'], {}), '(*sorted_uuids)\n', (14565, 14580), True, 'import utool as ut\n'), ((15512, 15527), 'utool.where', 'ut.where', (['flags'], {}), '(flags)\n', (15520, 15527), True, 'import utool as ut\n'), ((15943, 15967), 'utool.group_indices', 'ut.group_indices', (['labels'], {}), '(labels)\n', (15959, 15967), True, 'import utool as ut\n'), ((16639, 16674), 'utool.take', 'ut.take', (['self._rowid_to_idx', 'rowids'], {}), '(self._rowid_to_idx, rowids)\n', (16646, 16674), True, 'import utool as ut\n'), ((18035, 18087), 'utool.list_alignment', 'ut.list_alignment', (['self._unique_rowids', 'self._rowids'], {}), '(self._unique_rowids, self._rowids)\n', (18052, 18087), True, 'import utool as ut\n'), ((19505, 19547), 'utool.take', 'ut.take', (['unique_data', 'self._unique_inverse'], {}), '(unique_data, self._unique_inverse)\n', (19512, 19547), True, 'import utool as ut\n'), ((20663, 20684), 'utool.isiterable', 'ut.isiterable', (['rowids'], {}), '(rowids)\n', (20676, 20684), True, 'import utool as ut\n'), ((8447, 8475), 'utool.get_func_sourcecode', 'ut.get_func_sourcecode', (['func'], {}), '(func)\n', (8469, 8475), True, 'import utool as ut\n'), ((8656, 8675), 'utool.unindent', 'ut.unindent', (['source'], {}), '(source)\n', (8667, 8675), True, 'import utool as ut\n'), ((8830, 8890), 're.sub', 're.sub', (['"""def .*\\\\(self"""', "('def ' + funcname + '(self')", 'source'], {}), "('def .*\\\\(self', 'def ' + funcname + '(self', source)\n", (8836, 8890), False, 'import re\n'), ((12134, 12165), 'utool.partial', 'ut.partial', (['getattr', 'self', 'attr'], {}), '(getattr, self, attr)\n', (12144, 12165), True, 'import utool as ut\n'), ((14104, 14122), 'utool.take', 'ut.take', (['val', 'idxs'], {}), '(val, idxs)\n', (14111, 14122), True, 'import utool as ut\n'), ((15090, 15108), 'utool.isiterable', 'ut.isiterable', (['idx'], {}), '(idx)\n', (15103, 15108), True, 'import utool as ut\n'), ((16585, 16619), 'utool.make_index_lookup', 'ut.make_index_lookup', (['self._rowids'], {}), '(self._rowids)\n', (16605, 16619), True, 'import utool as ut\n'), ((18140, 18154), 'utool.ddict', 'ut.ddict', (['dict'], {}), '(dict)\n', (18148, 18154), True, 'import utool as ut\n'), ((19370, 19414), 'utool.take', 'ut.take', (['rowid_to_value', 'self._unique_rowids'], {}), '(rowid_to_value, self._unique_rowids)\n', (19377, 19414), True, 'import utool as ut\n'), ((4960, 4974), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (4968, 4974), True, 'import numpy as np\n'), ((5489, 5503), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (5497, 5503), True, 'import numpy as np\n'), ((5966, 5980), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (5974, 5980), True, 'import numpy as np\n'), ((9474, 9500), 'utool.get_funcname', 'ut.get_funcname', (['prop.fget'], {}), '(prop.fget)\n', (9489, 9500), True, 'import utool as ut\n'), ((9558, 9584), 'utool.get_funcname', 'ut.get_funcname', (['prop.fset'], {}), '(prop.fset)\n', (9573, 9584), True, 'import utool as ut\n'), ((10419, 10436), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (10426, 10436), False, 'from os.path import dirname, join\n'), ((1334, 1351), 're.match', 're.match', (['pat', 'fn'], {}), '(pat, fn)\n', (1342, 1351), False, 'import re\n'), ((1682, 1699), 're.match', 're.match', (['pat', 'fn'], {}), '(pat, fn)\n', (1690, 1699), False, 'import re\n'), ((8788, 8799), 'utool.repr2', 'ut.repr2', (['v'], {}), '(v)\n', (8796, 8799), True, 'import utool as ut\n')] |
import torch.nn as nn
import os
import pandas as pd
import torch.optim as optim
from tqdm import tqdm
import numpy as np
import torch
import torch.nn.functional as nnf
import SimpleITK as sitk
import json
import random
import time
import medpy.metric.binary as mmb
from scipy import ndimage
from batchgenerators.utilities.file_and_folder_operations import load_pickle, save_pickle
from nnunet.utilities.nd_softmax import softmax_helper
from PairwiseMeasures_modified import PairwiseMeasures
import medpy.io as mio
from MyDataloader import get_train_cases, get_cmbdataloader
from MyNetwork import UNet3Stage
from MyLoss import FocalLoss, SoftDiceLoss, DC_and_Focal_loss
from ScreenTrainer import ScreenTrainer
from DiscriTrainer import DiscriTrainer
class UNetTrainer(nn.Module):
def __init__(self, data_path, model_save_path, dataset_path, screen_model_path, discri_model_path,
load_screen='current', load_discri='current', device='cuda',
all_epoch=50, fold=0, bbox=(32, 32, 24), batch_size=32, loss='soft dice',
optimizer='sgd', init_lr=1e-3, decay_exponent=0.9, config=None, if_test=False,
random_negatives=200000, aug_num=30, add_fp=False,
resample_num=(100000, 100000, 100000), modality=('T1', 'T2', 'T2S')):
super(UNetTrainer, self).__init__()
self.bbox = bbox
self.batch_size = batch_size
self.init_lr = init_lr
self.decay_exponent = decay_exponent
self.all_epoch = all_epoch
self.config = config
self.resample_num = resample_num
self.modality = modality
self.aug_num = aug_num
self.fold = fold
self.random_negatives = random_negatives
self.screen_trainer = ScreenTrainer(
data_path=data_path,
model_save_path=screen_model_path,
dataset_path=dataset_path,
device=device,
fold=fold,
modality=modality,
if_test=True)
self.screen_trainer.load_model(load_screen)
self.discri_trainer = DiscriTrainer(
data_path=data_path,
screen_model_path=screen_model_path,
load_screen=None,
model_save_path=discri_model_path,
dataset_path=dataset_path,
device=device,
fold=fold,
modality=modality,
if_test=True)
self.discri_trainer.load_model(load_discri)
# path define
self.data_path = data_path
self.dataset_path = dataset_path
self.model_save_path = model_save_path + 'fold_%d/' % fold
if not os.path.exists(self.model_save_path):
os.makedirs(self.model_save_path)
# device
self.device = device
# load division of data
if os.path.exists(dataset_path + 'fold_division.json'):
with open(dataset_path + 'fold_division.json', mode='r') as f:
splits = json.load(f)
self.train_list_sub = splits[str(fold)]['train']
self.val_list_sub = splits[str(fold)]['val']
else:
self.train_list_sub = []
self.val_list_sub = []
print('Data division is empty!')
# training and validation samples
if not if_test:
self.dataset_name = 'fold_%d/bbox-%d-%d-%d_neg-%d_aug-%d/' % \
(fold, self.screen_trainer.bbox[0], self.screen_trainer.bbox[1],
self.screen_trainer.bbox[2], random_negatives, aug_num)
if not os.path.exists(dataset_path + self.dataset_name):
os.makedirs(dataset_path + self.dataset_name)
# load or generate the training samples
if os.path.exists(dataset_path + self.dataset_name + 'pos.json'):
with open(dataset_path + self.dataset_name + 'pos.json', mode='r') as f:
self.train_cases_pos = json.load(f)
if os.path.exists(dataset_path + self.dataset_name + 'neg.json'):
with open(dataset_path + self.dataset_name + 'neg.json', mode='r') as f:
self.train_cases_neg = json.load(f)
else:
self.train_cases_pos, self.train_cases_neg = get_train_cases(
data_path=self.data_path, train_list=self.train_list_sub, bbox=self.bbox, seed=2021,
if_translation=True, random_negatives=random_negatives, aug_num=aug_num)
with open(dataset_path + self.dataset_name + 'pos.json', mode='w') as f:
json.dump(self.train_cases_pos, f)
with open(dataset_path + self.dataset_name + 'neg.json', mode='w') as f:
json.dump(self.train_cases_neg, f)
# load false positive samples
self.train_cases_fp = []
if add_fp:
if os.path.exists(dataset_path + 'fold_%d/fp_%s_current.json' % (self.fold, self.screen_trainer.model_name)):
with open(dataset_path + 'fold_%d/fp_%s_current.json' % (self.fold, self.screen_trainer.model_name), mode='r') as f:
self.train_cases_fp = json.load(f)
print('Dataset: pos %d, neg %d, fp %d' %
(len(self.train_cases_pos), len(self.train_cases_neg), len(self.train_cases_fp)))
else:
self.train_cases_fp = []
self.train_cases_pos = []
self.train_cases_neg = []
# model
self.model = UNet3Stage(in_channel=len(modality), num_class=2)
self.model.to(self.device)
# loss function
if loss == 'soft dice':
self.loss_seg = SoftDiceLoss(
**{'apply_nonlin': None, 'batch_dice': True, 'smooth': 1e-5, 'do_bg': True})
elif loss == 'dice focal':
self.loss_seg = DC_and_Focal_loss(
{'batch_dice': True, 'smooth': 1e-5, 'do_bg': False},
{'alpha': 0.5, 'gamma': 2, 'smooth': 1e-5})
else:
raise ValueError('No such seg loss')
# optimizer
if optimizer == 'sgd':
self.optimizer = optim.SGD(self.model.parameters(), lr=init_lr, momentum=0.99, nesterov=True)
elif optimizer == 'adam':
self.optimizer = optim.Adam(self.model.parameters(), lr=init_lr)
else:
raise ValueError('No such optimizer')
self.epoch = 1
self.lr = init_lr
self.train_metric = [0] * 2
self.test_metric = [0] * 7
def train_epoch(self):
self.model.train()
train_accum = [0] * 4
train_cases_fp = self.train_cases_fp.copy()
train_cases_pos = self.train_cases_pos.copy()
train_cases_neg = self.train_cases_neg.copy()
# randomly choose training samples, ensuring that the number of samples is fixed under different conditions
if len(self.resample_num):
train_cases_pos = np.random.choice(train_cases_pos, size=self.resample_num[0]).tolist()
train_cases_neg = np.random.choice(train_cases_neg, size=self.resample_num[1]).tolist()
if len(train_cases_fp):
train_cases_fp = np.random.choice(train_cases_fp, size=self.resample_num[2]).tolist()
data_list = train_cases_pos + train_cases_neg + train_cases_fp
dataloader = get_cmbdataloader(
data_path=self.data_path,
dataset_index=data_list,
bbox=self.bbox,
batch_size=self.batch_size,
shuffle=True,
pin_memory=True,
num_workers=2,
modality=self.modality,
if_seg=True
)
dataloader = tqdm(dataloader)
for img_batch, label_batch, mask_batch in dataloader:
img_batch = img_batch.to(self.device).float()
mask_batch = mask_batch.to(self.device)
seg_pred_batch = self.model(img_batch)
loss = self.loss_seg(seg_pred_batch, mask_batch)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
train_accum[0] += img_batch.shape[0]
have_cmb = 1 if torch.sum(label_batch) else 0
train_accum[1] += have_cmb
loss_value = loss.detach().cpu().numpy()
train_accum[2] += loss_value + 1
train_accum[3] += - loss_value * have_cmb
self.train_metric[0] = train_accum[2] / train_accum[0] # loss
self.train_metric[1] = train_accum[3] / train_accum[1] # dice
dataloader.set_description('Epoch: %d, ' % self.epoch + 'train loss %.4f, ' % self.train_metric[0] +
'train dice %.4f, ' % self.train_metric[1])
return self.train_metric
def val_epoch(self):
self.screen_trainer.model.eval()
self.discri_trainer.model.eval()
self.model.eval()
test_accum = [0] * 9
for pat in self.val_list_sub:
data_list = []
for mod in self.modality:
data_list.append(np.load(self.data_path + '%s/%s_space-T2S_%s.npy' % (pat, pat, mod)))
cmb, h = mio.load(self.data_path + '%s/%s_space-T2S_CMB.nii.gz' % (pat, pat))
img = np.stack(data_list, axis=0)
pred, pred_post, n_obj, pred_init_space, candidates_list, score_init_space = \
self.screen_trainer.inference(img, patch_size=(160, 160, 80), thresh=0.1, size=2, if_nms=True)
pred_fp_reduced, reduc_candidates_list, num = self.discri_trainer.inference(img, candidates_list, size=2, thresh=0.5)
seg_pred = self.inference(img, reduc_candidates_list)
pe_seg = PairwiseMeasures(ref_img=cmb, seg_img=seg_pred, analysis='microbleeds',
measures=('f1_score', 'tp', 'fn', 'fp', 'mean_diceover',
'absolute_count_difference', 'absolute_volume_difference'),
connectivity=3, pixdim=h.get_voxel_spacing(), empty=True,
threshold=0.5, thresh_assign=1)
tp_seg, fn_seg, fp_seg, f1_seg = pe_seg.m_dict['tp'][0](), pe_seg.m_dict['fn'][0](), \
pe_seg.m_dict['fp'][0](), pe_seg.m_dict['f1_score'][0]()
dice = pe_seg.m_dict['mean_diceover'][0]()
vol_diff = pe_seg.m_dict['absolute_volume_difference'][0]()
count_diff = pe_seg.m_dict['absolute_count_difference'][0]()
test_accum[0] += 1 # number of cases
test_accum[1] += 1 if np.sum(cmb) else 0 # number of cases with CMB
test_accum[2] += tp_seg
test_accum[3] += fn_seg
test_accum[4] += fp_seg
test_accum[5] += count_diff
test_accum[6] += f1_seg if np.sum(cmb) else 0
test_accum[7] += dice if np.sum(cmb) else 0
test_accum[8] += vol_diff
print('%s: TP %d, FN %d, FP %d, count diff %.2f, F1 %.2f, Dice %.2f, volume diff %.2f' %
(pat, tp_seg, fn_seg, fp_seg, count_diff, f1_seg, dice, vol_diff))
self.test_metric[0] = test_accum[2]
self.test_metric[1] = test_accum[3]
self.test_metric[2] = test_accum[4] / test_accum[0]
self.test_metric[3] = test_accum[5] / test_accum[0]
self.test_metric[4] = test_accum[6] / test_accum[1]
self.test_metric[5] = test_accum[7] / test_accum[1]
self.test_metric[6] = test_accum[8] / test_accum[0]
print('Epoch: %d, TP %d, FN %d, avg FP %.2f, count diff %.2f, F1 %.2f, Dice %.2f, volume diff %.2f' %
(self.epoch, self.test_metric[0], self.test_metric[1], self.test_metric[2],
self.test_metric[3], self.test_metric[4], self.test_metric[5], self.test_metric[6]))
return self.test_metric
def adjust_lr(self):
"""Adjust the learning rate following ‘poly’ policy"""
self.lr = self.init_lr * (1 - self.epoch / self.all_epoch) ** self.decay_exponent
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.lr
return self.lr
def save_model(self, force=False):
"""Save the model every epoch(current) and every 5 epochs(epoch_xx)"""
state = {
'epoch': self.epoch,
'state_dict': self.model.state_dict(),
'config': self.config,
}
torch.save(state, self.model_save_path + 'current.pth.tar')
if self.epoch % 5 == 0 or force:
torch.save(state, self.model_save_path + 'epoch_%d_%.2f_%.2f_%.2f_%.2f.pth.tar' %
(self.epoch, self.test_metric[3], self.test_metric[4], self.test_metric[5], self.test_metric[6]))
def load_model(self, model_name='current', silent=False):
all_saved_models = os.listdir(self.model_save_path)
matched_model = [model for model in all_saved_models if model.startswith(model_name)]
if len(matched_model) == 1:
checkpoint = torch.load(self.model_save_path + matched_model[0], map_location={'cuda:0': self.device})
self.epoch = checkpoint['epoch'] + 1
self.model.load_state_dict(checkpoint['state_dict'])
self.model.to(self.device)
# self.config = checkpoint['config']
self.adjust_lr()
elif len(matched_model) > 1:
raise ValueError('Too many matched models!')
if not silent:
print('Segmentation model: %s, device: %s, epoch: %d'
% (self.model_save_path + model_name, self.device, self.epoch))
def inference(self, data: np.ndarray, candidates_list, if_translate=False):
init_shape = data.shape[1:]
enlarged_data = np.pad(data, ((0, 0), (self.bbox[0], self.bbox[0]), (self.bbox[1], self.bbox[1]), (self.bbox[2], self.bbox[2])),
mode='constant', constant_values=0)
shape = enlarged_data.shape[1:]
seg_pred = np.zeros(shape)
overlap = np.zeros(shape)
for position in candidates_list:
position = np.array(position, dtype=int)
if if_translate:
x, y, z = position
position_enlarged = [[i, j, k] for i in [x - 1, x, x + 1] for j in [y - 1, y, y + 1] for k in
[z - 1, z, z + 1]]
else:
position_enlarged = [position]
regions = np.zeros((len(position_enlarged), len(self.modality), self.bbox[0], self.bbox[1], self.bbox[2]))
for i, pos in enumerate(position_enlarged):
pos_new = pos + self.bbox
neighbour = self.get_neighbour(enlarged_data, pos_new)
regions[i] = neighbour
# print(neighbour.shape, pos_new, shape)
regions = torch.tensor(regions, dtype=torch.float32, device=self.device)
out_seg = self.model(regions).detach()[:, 1]
for i, pos in enumerate(position_enlarged):
pos_new = pos + self.bbox
seg_pred[pos_new[0]-self.bbox[0]//2:pos_new[0]+self.bbox[0]//2,
pos_new[1]-self.bbox[1]//2:pos_new[1]+self.bbox[1]//2,
pos_new[2]-self.bbox[2]//2:pos_new[2]+self.bbox[2]//2] += out_seg[i].cpu().numpy()
overlap[pos_new[0]-self.bbox[0]//2:pos_new[0]+self.bbox[0]//2,
pos_new[1]-self.bbox[1]//2:pos_new[1]+self.bbox[1]//2,
pos_new[2]-self.bbox[2]//2:pos_new[2]+self.bbox[2]//2] += 1
seg_pred = seg_pred[self.bbox[0]:self.bbox[0]+init_shape[0],
self.bbox[1]:self.bbox[1]+init_shape[1],
self.bbox[2]:self.bbox[2]+init_shape[2]]
overlap = overlap[self.bbox[0]:self.bbox[0]+init_shape[0],
self.bbox[1]:self.bbox[1]+init_shape[1],
self.bbox[2]:self.bbox[2]+init_shape[2]]
seg_pred /= np.clip(overlap, a_min=1e-5, a_max=1e10)
return seg_pred
def get_neighbour(self, data: np.ndarray, position):
return data[:, position[0]-self.bbox[0]//2:position[0]+self.bbox[0]//2,
position[1]-self.bbox[1]//2:position[1]+self.bbox[1]//2,
position[2]-self.bbox[2]//2:position[2]+self.bbox[2]//2]
| [
"numpy.clip",
"medpy.io.load",
"numpy.array",
"torch.sum",
"MyLoss.DC_and_Focal_loss",
"MyDataloader.get_cmbdataloader",
"os.path.exists",
"os.listdir",
"numpy.stack",
"numpy.random.choice",
"MyLoss.SoftDiceLoss",
"DiscriTrainer.DiscriTrainer",
"torch.save",
"os.makedirs",
"MyDataloader.... | [((1807, 1970), 'ScreenTrainer.ScreenTrainer', 'ScreenTrainer', ([], {'data_path': 'data_path', 'model_save_path': 'screen_model_path', 'dataset_path': 'dataset_path', 'device': 'device', 'fold': 'fold', 'modality': 'modality', 'if_test': '(True)'}), '(data_path=data_path, model_save_path=screen_model_path,\n dataset_path=dataset_path, device=device, fold=fold, modality=modality,\n if_test=True)\n', (1820, 1970), False, 'from ScreenTrainer import ScreenTrainer\n'), ((2139, 2358), 'DiscriTrainer.DiscriTrainer', 'DiscriTrainer', ([], {'data_path': 'data_path', 'screen_model_path': 'screen_model_path', 'load_screen': 'None', 'model_save_path': 'discri_model_path', 'dataset_path': 'dataset_path', 'device': 'device', 'fold': 'fold', 'modality': 'modality', 'if_test': '(True)'}), '(data_path=data_path, screen_model_path=screen_model_path,\n load_screen=None, model_save_path=discri_model_path, dataset_path=\n dataset_path, device=device, fold=fold, modality=modality, if_test=True)\n', (2152, 2358), False, 'from DiscriTrainer import DiscriTrainer\n'), ((2890, 2941), 'os.path.exists', 'os.path.exists', (["(dataset_path + 'fold_division.json')"], {}), "(dataset_path + 'fold_division.json')\n", (2904, 2941), False, 'import os\n'), ((7504, 7708), 'MyDataloader.get_cmbdataloader', 'get_cmbdataloader', ([], {'data_path': 'self.data_path', 'dataset_index': 'data_list', 'bbox': 'self.bbox', 'batch_size': 'self.batch_size', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': '(2)', 'modality': 'self.modality', 'if_seg': '(True)'}), '(data_path=self.data_path, dataset_index=data_list, bbox=\n self.bbox, batch_size=self.batch_size, shuffle=True, pin_memory=True,\n num_workers=2, modality=self.modality, if_seg=True)\n', (7521, 7708), False, 'from MyDataloader import get_train_cases, get_cmbdataloader\n'), ((7850, 7866), 'tqdm.tqdm', 'tqdm', (['dataloader'], {}), '(dataloader)\n', (7854, 7866), False, 'from tqdm import tqdm\n'), ((12703, 12762), 'torch.save', 'torch.save', (['state', "(self.model_save_path + 'current.pth.tar')"], {}), "(state, self.model_save_path + 'current.pth.tar')\n", (12713, 12762), False, 'import torch\n'), ((13115, 13147), 'os.listdir', 'os.listdir', (['self.model_save_path'], {}), '(self.model_save_path)\n', (13125, 13147), False, 'import os\n'), ((14047, 14205), 'numpy.pad', 'np.pad', (['data', '((0, 0), (self.bbox[0], self.bbox[0]), (self.bbox[1], self.bbox[1]), (self.\n bbox[2], self.bbox[2]))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(data, ((0, 0), (self.bbox[0], self.bbox[0]), (self.bbox[1], self.\n bbox[1]), (self.bbox[2], self.bbox[2])), mode='constant', constant_values=0\n )\n", (14053, 14205), True, 'import numpy as np\n'), ((14289, 14304), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (14297, 14304), True, 'import numpy as np\n'), ((14324, 14339), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (14332, 14339), True, 'import numpy as np\n'), ((16322, 16372), 'numpy.clip', 'np.clip', (['overlap'], {'a_min': '(1e-05)', 'a_max': '(10000000000.0)'}), '(overlap, a_min=1e-05, a_max=10000000000.0)\n', (16329, 16372), True, 'import numpy as np\n'), ((2708, 2744), 'os.path.exists', 'os.path.exists', (['self.model_save_path'], {}), '(self.model_save_path)\n', (2722, 2744), False, 'import os\n'), ((2759, 2792), 'os.makedirs', 'os.makedirs', (['self.model_save_path'], {}), '(self.model_save_path)\n', (2770, 2792), False, 'import os\n'), ((3849, 3910), 'os.path.exists', 'os.path.exists', (["(dataset_path + self.dataset_name + 'pos.json')"], {}), "(dataset_path + self.dataset_name + 'pos.json')\n", (3863, 3910), False, 'import os\n'), ((4075, 4136), 'os.path.exists', 'os.path.exists', (["(dataset_path + self.dataset_name + 'neg.json')"], {}), "(dataset_path + self.dataset_name + 'neg.json')\n", (4089, 4136), False, 'import os\n'), ((5807, 5901), 'MyLoss.SoftDiceLoss', 'SoftDiceLoss', ([], {}), "(**{'apply_nonlin': None, 'batch_dice': True, 'smooth': 1e-05,\n 'do_bg': True})\n", (5819, 5901), False, 'from MyLoss import FocalLoss, SoftDiceLoss, DC_and_Focal_loss\n'), ((9359, 9427), 'medpy.io.load', 'mio.load', (["(self.data_path + '%s/%s_space-T2S_CMB.nii.gz' % (pat, pat))"], {}), "(self.data_path + '%s/%s_space-T2S_CMB.nii.gz' % (pat, pat))\n", (9367, 9427), True, 'import medpy.io as mio\n'), ((9447, 9474), 'numpy.stack', 'np.stack', (['data_list'], {'axis': '(0)'}), '(data_list, axis=0)\n', (9455, 9474), True, 'import numpy as np\n'), ((12818, 13007), 'torch.save', 'torch.save', (['state', "(self.model_save_path + 'epoch_%d_%.2f_%.2f_%.2f_%.2f.pth.tar' % (self.\n epoch, self.test_metric[3], self.test_metric[4], self.test_metric[5],\n self.test_metric[6]))"], {}), "(state, self.model_save_path + \n 'epoch_%d_%.2f_%.2f_%.2f_%.2f.pth.tar' % (self.epoch, self.test_metric[\n 3], self.test_metric[4], self.test_metric[5], self.test_metric[6]))\n", (12828, 13007), False, 'import torch\n'), ((13306, 13399), 'torch.load', 'torch.load', (['(self.model_save_path + matched_model[0])'], {'map_location': "{'cuda:0': self.device}"}), "(self.model_save_path + matched_model[0], map_location={'cuda:0':\n self.device})\n", (13316, 13399), False, 'import torch\n'), ((14406, 14435), 'numpy.array', 'np.array', (['position'], {'dtype': 'int'}), '(position, dtype=int)\n', (14414, 14435), True, 'import numpy as np\n'), ((15150, 15212), 'torch.tensor', 'torch.tensor', (['regions'], {'dtype': 'torch.float32', 'device': 'self.device'}), '(regions, dtype=torch.float32, device=self.device)\n', (15162, 15212), False, 'import torch\n'), ((3045, 3057), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3054, 3057), False, 'import json\n'), ((3667, 3715), 'os.path.exists', 'os.path.exists', (['(dataset_path + self.dataset_name)'], {}), '(dataset_path + self.dataset_name)\n', (3681, 3715), False, 'import os\n'), ((3734, 3779), 'os.makedirs', 'os.makedirs', (['(dataset_path + self.dataset_name)'], {}), '(dataset_path + self.dataset_name)\n', (3745, 3779), False, 'import os\n'), ((4366, 4548), 'MyDataloader.get_train_cases', 'get_train_cases', ([], {'data_path': 'self.data_path', 'train_list': 'self.train_list_sub', 'bbox': 'self.bbox', 'seed': '(2021)', 'if_translation': '(True)', 'random_negatives': 'random_negatives', 'aug_num': 'aug_num'}), '(data_path=self.data_path, train_list=self.train_list_sub,\n bbox=self.bbox, seed=2021, if_translation=True, random_negatives=\n random_negatives, aug_num=aug_num)\n', (4381, 4548), False, 'from MyDataloader import get_train_cases, get_cmbdataloader\n'), ((5000, 5109), 'os.path.exists', 'os.path.exists', (["(dataset_path + 'fold_%d/fp_%s_current.json' % (self.fold, self.\n screen_trainer.model_name))"], {}), "(dataset_path + 'fold_%d/fp_%s_current.json' % (self.fold,\n self.screen_trainer.model_name))\n", (5014, 5109), False, 'import os\n'), ((5980, 6102), 'MyLoss.DC_and_Focal_loss', 'DC_and_Focal_loss', (["{'batch_dice': True, 'smooth': 1e-05, 'do_bg': False}", "{'alpha': 0.5, 'gamma': 2, 'smooth': 1e-05}"], {}), "({'batch_dice': True, 'smooth': 1e-05, 'do_bg': False}, {\n 'alpha': 0.5, 'gamma': 2, 'smooth': 1e-05})\n", (5997, 6102), False, 'from MyLoss import FocalLoss, SoftDiceLoss, DC_and_Focal_loss\n'), ((8345, 8367), 'torch.sum', 'torch.sum', (['label_batch'], {}), '(label_batch)\n', (8354, 8367), False, 'import torch\n'), ((10836, 10847), 'numpy.sum', 'np.sum', (['cmb'], {}), '(cmb)\n', (10842, 10847), True, 'import numpy as np\n'), ((11075, 11086), 'numpy.sum', 'np.sum', (['cmb'], {}), '(cmb)\n', (11081, 11086), True, 'import numpy as np\n'), ((11132, 11143), 'numpy.sum', 'np.sum', (['cmb'], {}), '(cmb)\n', (11138, 11143), True, 'import numpy as np\n'), ((4046, 4058), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4055, 4058), False, 'import json\n'), ((4272, 4284), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4281, 4284), False, 'import json\n'), ((4694, 4728), 'json.dump', 'json.dump', (['self.train_cases_pos', 'f'], {}), '(self.train_cases_pos, f)\n', (4703, 4728), False, 'import json\n'), ((4840, 4874), 'json.dump', 'json.dump', (['self.train_cases_neg', 'f'], {}), '(self.train_cases_neg, f)\n', (4849, 4874), False, 'import json\n'), ((7099, 7159), 'numpy.random.choice', 'np.random.choice', (['train_cases_pos'], {'size': 'self.resample_num[0]'}), '(train_cases_pos, size=self.resample_num[0])\n', (7115, 7159), True, 'import numpy as np\n'), ((7200, 7260), 'numpy.random.choice', 'np.random.choice', (['train_cases_neg'], {'size': 'self.resample_num[1]'}), '(train_cases_neg, size=self.resample_num[1])\n', (7216, 7260), True, 'import numpy as np\n'), ((9267, 9335), 'numpy.load', 'np.load', (["(self.data_path + '%s/%s_space-T2S_%s.npy' % (pat, pat, mod))"], {}), "(self.data_path + '%s/%s_space-T2S_%s.npy' % (pat, pat, mod))\n", (9274, 9335), True, 'import numpy as np\n'), ((5292, 5304), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5301, 5304), False, 'import json\n'), ((7341, 7400), 'numpy.random.choice', 'np.random.choice', (['train_cases_fp'], {'size': 'self.resample_num[2]'}), '(train_cases_fp, size=self.resample_num[2])\n', (7357, 7400), True, 'import numpy as np\n')] |
import numpy as np
import rclpy
from rclpy.node import Node
import tf_transformations
from geometry_msgs.msg import Twist, Quaternion
from nav_msgs.msg import Odometry
class OdometryPublisher(Node):
def __init__(self):
super().__init__('odom_publisher')
# subscriber
self.vel_sub = self.create_subscription(
Twist,
'/ninjasrobot/velocity',
self.vel_sub_cb,
1
)
self.vel_sub # prevent unused variable warning
# publisher
self.odom_pub = self.create_publisher(Odometry, '/odom', 10)
self.odom_pub_timer = self.create_timer(0.2, self.odom_pub_timer_cb)
# variables
self.x = 0.
self.y = 0.
self.th = 0.
self.lin_x = 0.
self.ang_z = 0.
self.cur_time = self.get_clock().now()
self.pre_time = self.get_clock().now()
# self.i = 0
def vel_sub_cb(self, msg):
self.lin_x = msg.linear.x
self.ang_z = msg.angular.z
def odom_pub_timer_cb(self):
# update pose
self.cur_time = self.get_clock().now()
dt = (self.cur_time - self.pre_time).nanoseconds * 1e-9 # convert to seconds
# print(dt)
delta_x = self.lin_x * np.cos(self.th) * dt
delta_y = self.lin_x * np.sin(self.th) * dt
delta_th = self.ang_z * dt
self.x += delta_x
self.y += delta_y
self.th += delta_th
q = tf_transformations.quaternion_about_axis(self.th, (0, 0, 1))
# print(self.lin_x, self.ang_z)
# prepare Odometry message
msg = Odometry()
msg.header.stamp = self.cur_time.to_msg()
msg.header.frame_id = "odom"
msg.child_frame_id = "base_link"
msg.pose.pose.position.x = self.x
msg.pose.pose.position.y = self.y
msg.pose.pose.orientation.x = q[0]
msg.pose.pose.orientation.y = q[1]
msg.pose.pose.orientation.z = q[2]
msg.pose.pose.orientation.w = q[3]
msg.twist.twist.linear.x = self.lin_x
msg.twist.twist.angular.z = self.ang_z
self.odom_pub.publish(msg)
self.get_logger().debug(f'Publishing: {msg}')
self.pre_time = self.cur_time
# self.i += 1
def main(args=None):
rclpy.init(args=args)
odom_publisher = OdometryPublisher()
rclpy.spin(odom_publisher)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
odom_publisher.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| [
"tf_transformations.quaternion_about_axis",
"nav_msgs.msg.Odometry",
"rclpy.spin",
"numpy.cos",
"numpy.sin",
"rclpy.init",
"rclpy.shutdown"
] | [((2270, 2291), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (2280, 2291), False, 'import rclpy\n'), ((2339, 2365), 'rclpy.spin', 'rclpy.spin', (['odom_publisher'], {}), '(odom_publisher)\n', (2349, 2365), False, 'import rclpy\n'), ((2556, 2572), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (2570, 2572), False, 'import rclpy\n'), ((1456, 1516), 'tf_transformations.quaternion_about_axis', 'tf_transformations.quaternion_about_axis', (['self.th', '(0, 0, 1)'], {}), '(self.th, (0, 0, 1))\n', (1496, 1516), False, 'import tf_transformations\n'), ((1606, 1616), 'nav_msgs.msg.Odometry', 'Odometry', ([], {}), '()\n', (1614, 1616), False, 'from nav_msgs.msg import Odometry\n'), ((1256, 1271), 'numpy.cos', 'np.cos', (['self.th'], {}), '(self.th)\n', (1262, 1271), True, 'import numpy as np\n'), ((1308, 1323), 'numpy.sin', 'np.sin', (['self.th'], {}), '(self.th)\n', (1314, 1323), True, 'import numpy as np\n')] |
"""
This file regroups several custom keras layers used in the generation model:
- RandomSpatialDeformation,
- RandomCrop,
- RandomFlip,
- SampleConditionalGMM,
- SampleResolution,
- GaussianBlur,
- DynamicGaussianBlur,
- MimicAcquisition,
- BiasFieldCorruption,
- IntensityAugmentation,
- DiceLoss,
- WeightedL2Loss,
- ResetValuesToZero,
- ConvertLabels,
- PadAroundCentre,
- MaskEdges
If you use this code, please cite the first SynthSeg paper:
https://github.com/BBillot/lab2im/blob/master/bibtex.bib
Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing permissions and limitations under the
License.
"""
# python imports
import numpy as np
import tensorflow as tf
import keras.backend as K
from keras.layers import Layer
# project imports
from . import utils
from . import edit_tensors as l2i_et
# third-party imports
from ext.neuron import utils as nrn_utils
import ext.neuron.layers as nrn_layers
class RandomSpatialDeformation(Layer):
"""This layer spatially deforms one or several tensors with a combination of affine and elastic transformations.
The input tensors are expected to have the same shape [batchsize, shape_dim1, ..., shape_dimn, channel].
The non linear deformation is obtained by:
1) a small-size SVF is sampled from a centred normal distribution of random standard deviation.
2) it is resized with trilinear interpolation to half the shape of the input tensor
3) it is integrated to obtain a diffeomorphic transformation
4) finally, it is resized (again with trilinear interpolation) to full image size
:param scaling_bounds: (optional) range of the random scaling to apply. The scaling factor for each dimension is
sampled from a uniform distribution of predefined bounds. Can either be:
1) a number, in which case the scaling factor is independently sampled from the uniform distribution of bounds
[1-scaling_bounds, 1+scaling_bounds] for each dimension.
2) a sequence, in which case the scaling factor is sampled from the uniform distribution of bounds
(1-scaling_bounds[i], 1+scaling_bounds[i]) for the i-th dimension.
3) a numpy array of shape (2, n_dims), in which case the scaling factor is sampled from the uniform distribution
of bounds (scaling_bounds[0, i], scaling_bounds[1, i]) for the i-th dimension.
4) False, in which case scaling is completely turned off.
Default is scaling_bounds = 0.15 (case 1)
:param rotation_bounds: (optional) same as scaling bounds but for the rotation angle, except that for cases 1
and 2, the bounds are centred on 0 rather than 1, i.e. [0+rotation_bounds[i], 0-rotation_bounds[i]].
Default is rotation_bounds = 15.
:param shearing_bounds: (optional) same as scaling bounds. Default is shearing_bounds = 0.012.
:param translation_bounds: (optional) same as scaling bounds. Default is translation_bounds = False, but we
encourage using it when cropping is deactivated (i.e. when output_shape=None in BrainGenerator).
:param enable_90_rotations: (optional) wheter to rotate the input by a random angle chosen in {0, 90, 180, 270}.
This is done regardless of the value of rotation_bounds. If true, a different value is sampled for each dimension.
:param nonlin_std: (optional) maximum value of the standard deviation of the normal distribution from which we
sample the small-size SVF. Set to 0 if you wish to completely turn the elastic deformation off.
:param nonlin_shape_factor: (optional) if nonlin_std is not False, factor between the shapes of the input tensor
and the shape of the input non-linear tensor.
:param inter_method: (optional) interpolation method when deforming the input tensor. Can be 'linear', or 'nearest'
"""
def __init__(self,
scaling_bounds=0.15,
rotation_bounds=10,
shearing_bounds=0.02,
translation_bounds=False,
enable_90_rotations=False,
nonlin_std=4.,
nonlin_shape_factor=.0625,
inter_method='linear',
**kwargs):
# shape attributes
self.n_inputs = 1
self.inshape = None
self.n_dims = None
self.small_shape = None
# deformation attributes
self.scaling_bounds = scaling_bounds
self.rotation_bounds = rotation_bounds
self.shearing_bounds = shearing_bounds
self.translation_bounds = translation_bounds
self.enable_90_rotations = enable_90_rotations
self.nonlin_std = nonlin_std
self.nonlin_shape_factor = nonlin_shape_factor
# boolean attributes
self.apply_affine_trans = (self.scaling_bounds is not False) | (self.rotation_bounds is not False) | \
(self.shearing_bounds is not False) | (self.translation_bounds is not False) | \
self.enable_90_rotations
self.apply_elastic_trans = self.nonlin_std > 0
# interpolation methods
self.inter_method = inter_method
super(RandomSpatialDeformation, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["scaling_bounds"] = self.scaling_bounds
config["rotation_bounds"] = self.rotation_bounds
config["shearing_bounds"] = self.shearing_bounds
config["translation_bounds"] = self.translation_bounds
config["enable_90_rotations"] = self.enable_90_rotations
config["nonlin_std"] = self.nonlin_std
config["nonlin_shape_factor"] = self.nonlin_shape_factor
config["inter_method"] = self.inter_method
return config
def build(self, input_shape):
if not isinstance(input_shape, list):
inputshape = [input_shape]
else:
self.n_inputs = len(input_shape)
inputshape = input_shape
self.inshape = inputshape[0][1:]
self.n_dims = len(self.inshape) - 1
if self.apply_elastic_trans:
self.small_shape = utils.get_resample_shape(self.inshape[:self.n_dims],
self.nonlin_shape_factor, self.n_dims)
else:
self.small_shape = None
self.inter_method = utils.reformat_to_list(self.inter_method, length=self.n_inputs, dtype='str')
self.built = True
super(RandomSpatialDeformation, self).build(input_shape)
def call(self, inputs, **kwargs):
# reformat inputs and get its shape
if self.n_inputs < 2:
inputs = [inputs]
types = [v.dtype for v in inputs]
inputs = [tf.cast(v, dtype='float32') for v in inputs]
batchsize = tf.split(tf.shape(inputs[0]), [1, self.n_dims + 1])[0]
# initialise list of transfors to operate
list_trans = list()
# add affine deformation to inputs list
if self.apply_affine_trans:
affine_trans = utils.sample_affine_transform(batchsize,
self.n_dims,
self.rotation_bounds,
self.scaling_bounds,
self.shearing_bounds,
self.translation_bounds,
self.enable_90_rotations)
list_trans.append(affine_trans)
# prepare non-linear deformation field and add it to inputs list
if self.apply_elastic_trans:
# sample small field from normal distribution of specified std dev
trans_shape = tf.concat([batchsize, tf.convert_to_tensor(self.small_shape, dtype='int32')], axis=0)
trans_std = tf.random.uniform((1, 1), maxval=self.nonlin_std)
elastic_trans = tf.random.normal(trans_shape, stddev=trans_std)
# reshape this field to half size (for smoother SVF), integrate it, and reshape to full image size
resize_shape = [max(int(self.inshape[i] / 2), self.small_shape[i]) for i in range(self.n_dims)]
elastic_trans = nrn_layers.Resize(size=resize_shape, interp_method='linear')(elastic_trans)
elastic_trans = nrn_layers.VecInt()(elastic_trans)
elastic_trans = nrn_layers.Resize(size=self.inshape[:self.n_dims], interp_method='linear')(elastic_trans)
list_trans.append(elastic_trans)
# apply deformations and return tensors with correct dtype
if self.apply_affine_trans | self.apply_elastic_trans:
inputs = [nrn_layers.SpatialTransformer(m)([v] + list_trans) for (m, v) in zip(self.inter_method, inputs)]
return [tf.cast(v, t) for (t, v) in zip(types, inputs)]
class RandomCrop(Layer):
"""Randomly crop all input tensors to a given shape. This cropping is applied to all channels.
The input tensors are expected to have shape [batchsize, shape_dim1, ..., shape_dimn, channel].
:param crop_shape: list with cropping shape in each dimension (excluding batch and channel dimension)
example:
if input is a tensor of shape [batchsize, 160, 160, 160, 3],
output = RandomCrop(crop_shape=[96, 128, 96])(input)
will yield an output of shape [batchsize, 96, 128, 96, 3] that is obtained by cropping with randomly selected
cropping indices.
"""
def __init__(self, crop_shape, **kwargs):
self.several_inputs = True
self.crop_max_val = None
self.crop_shape = crop_shape
self.n_dims = len(crop_shape)
self.list_n_channels = None
super(RandomCrop, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["crop_shape"] = self.crop_shape
return config
def build(self, input_shape):
if not isinstance(input_shape, list):
self.several_inputs = False
inputshape = [input_shape]
else:
inputshape = input_shape
self.crop_max_val = np.array(np.array(inputshape[0][1:self.n_dims + 1])) - np.array(self.crop_shape)
self.list_n_channels = [i[-1] for i in inputshape]
self.built = True
super(RandomCrop, self).build(input_shape)
def call(self, inputs, **kwargs):
# if one input only is provided, performs the cropping directly
if not self.several_inputs:
return tf.map_fn(self._single_slice, inputs, dtype=inputs.dtype)
# otherwise we concatenate all inputs before cropping, so that they are all cropped at the same location
else:
types = [v.dtype for v in inputs]
inputs = tf.concat([tf.cast(v, 'float32') for v in inputs], axis=-1)
inputs = tf.map_fn(self._single_slice, inputs, dtype=tf.float32)
inputs = tf.split(inputs, self.list_n_channels, axis=-1)
return [tf.cast(v, t) for (t, v) in zip(types, inputs)]
def _single_slice(self, vol):
crop_idx = tf.cast(tf.random.uniform([self.n_dims], 0, np.array(self.crop_max_val), 'float32'), dtype='int32')
crop_idx = tf.concat([crop_idx, tf.zeros([1], dtype='int32')], axis=0)
crop_size = tf.convert_to_tensor(self.crop_shape + [-1], dtype='int32')
return tf.slice(vol, begin=crop_idx, size=crop_size)
def compute_output_shape(self, input_shape):
output_shape = [tuple([None] + self.crop_shape + [v]) for v in self.list_n_channels]
return output_shape if self.several_inputs else output_shape[0]
class RandomFlip(Layer):
"""This function flips the input tensors along the specified axes with a probability of 0.5.
The input tensors are expected to have shape [batchsize, shape_dim1, ..., shape_dimn, channel].
If specified, this layer can also swap corresponding values, such that the flip tensors stay consistent with the
native spatial orientation (especially when flipping in the righ/left dimension).
:param flip_axis: integer, or list of integers specifying the dimensions along which to flip. The values exclude the
batch dimension (e.g. 0 will flip the tensor along the first axis after the batch dimension). Default is None, where
the tensors can be flipped along any of the axes (except batch and channel axes).
:param swap_labels: list of booleans to specify wether to swap the values of each input. All the inputs for which
the values need to be swapped must have a int32 ot int64 dtype.
:param label_list: if swap_labels is True, list of all labels contained in labels. Must be ordered as follows, first
the neutral labels (i.e. non-sided), then left labels and right labels.
:param n_neutral_labels: if swap_labels is True, number of non-sided labels
example 1:
if input is a tensor of shape (batchsize, 10, 100, 200, 3)
output = RandomFlip()(input) will randomly flip input along one of the 1st, 2nd, or 3rd axis (i.e. those with shape
10, 100, 200).
example 2:
if input is a tensor of shape (batchsize, 10, 100, 200, 3)
output = RandomFlip(flip_axis=1)(input) will randomly flip input along the 3rd axis (with shape 100), i.e. the axis
with index 1 if we don't count the batch axis.
example 3:
input = tf.convert_to_tensor(np.array([[1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 2, 2, 0],
[1, 0, 0, 0, 2, 2, 0],
[1, 0, 0, 0, 2, 2, 0],
[1, 0, 0, 0, 0, 0, 0]]))
label_list = np.array([0, 1, 2])
n_neutral_labels = 1
output = RandomFlip(flip_axis=1, swap_labels=True, label_list=label_list, n_neutral_labels=n_neutral_labels)(input)
where output will either be equal to input (bear in mind the flipping occurs with a 0.5 probability), or:
output = [[0, 0, 0, 0, 0, 0, 2],
[0, 1, 1, 0, 0, 0, 2],
[0, 1, 1, 0, 0, 0, 2],
[0, 1, 1, 0, 0, 0, 2],
[0, 0, 0, 0, 0, 0, 2]]
Note that the input must have a dtype int32 or int64 for its values to be swapped, otherwise an error will be raised
example 4:
if labels is the same as in the input of example 3, and image is a float32 image, then we can swap consistently both
the labels and the image with:
labels, image = RandomFlip(flip_axis=1, swap_labels=[True, False], label_list=label_list,
n_neutral_labels=n_neutral_labels)([labels, image]])
Note that the labels must have a dtype int32 or int64 to be swapped, otherwise an error will be raised.
This doesn't concern the image input, as its values are not swapped.
"""
def __init__(self, flip_axis=None, swap_labels=False, label_list=None, n_neutral_labels=None, **kwargs):
# shape attributes
self.several_inputs = True
self.n_dims = None
self.list_n_channels = None
# axis along which to flip
self.flip_axis = utils.reformat_to_list(flip_axis)
# wether to swap labels, and corresponding label list
self.swap_labels = utils.reformat_to_list(swap_labels)
self.label_list = label_list
self.n_neutral_labels = n_neutral_labels
self.swap_lut = None
super(RandomFlip, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["flip_axis"] = self.flip_axis
config["swap_labels"] = self.swap_labels
config["label_list"] = self.label_list
config["n_neutral_labels"] = self.n_neutral_labels
return config
def build(self, input_shape):
if not isinstance(input_shape, list):
self.several_inputs = False
inputshape = [input_shape]
else:
inputshape = input_shape
self.n_dims = len(inputshape[0][1:-1])
self.list_n_channels = [i[-1] for i in inputshape]
self.swap_labels = utils.reformat_to_list(self.swap_labels, length=len(inputshape))
# create label list with swapped labels
if any(self.swap_labels):
assert (self.label_list is not None) & (self.n_neutral_labels is not None), \
'please provide a label_list, and n_neutral_labels when swapping the values of at least one input'
n_labels = len(self.label_list)
if self.n_neutral_labels == n_labels:
self.swap_labels = [False] * len(self.swap_labels)
else:
rl_split = np.split(self.label_list, [self.n_neutral_labels,
self.n_neutral_labels + int((n_labels-self.n_neutral_labels)/2)])
label_list_swap = np.concatenate((rl_split[0], rl_split[2], rl_split[1]))
swap_lut = utils.get_mapping_lut(self.label_list, label_list_swap)
self.swap_lut = tf.convert_to_tensor(swap_lut, dtype='int32')
self.built = True
super(RandomFlip, self).build(input_shape)
def call(self, inputs, **kwargs):
# convert inputs to list, and get each input type
if not self.several_inputs:
inputs = [inputs]
types = [v.dtype for v in inputs]
# sample boolean for each element of the batch
batchsize = tf.split(tf.shape(inputs[0]), [1, self.n_dims + 1])[0]
rand_flip = K.greater(tf.random.uniform(tf.concat([batchsize, tf.ones(1, dtype='int32')], axis=0), 0, 1), 0.5)
# swap r/l labels if necessary
swapped_inputs = list()
for i in range(len(inputs)):
if self.swap_labels[i]:
swapped_inputs.append(tf.map_fn(self._single_swap, [inputs[i], rand_flip], dtype=types[i]))
else:
swapped_inputs.append(inputs[i])
# flip inputs and convert them back to their original type
inputs = tf.concat([tf.cast(v, 'float32') for v in swapped_inputs], axis=-1)
inputs = tf.map_fn(self._single_flip, [inputs, rand_flip], dtype=tf.float32)
inputs = tf.split(inputs, self.list_n_channels, axis=-1)
return [tf.cast(v, t) for (t, v) in zip(types, inputs)]
def _single_swap(self, inputs):
return K.switch(inputs[1], tf.gather(self.swap_lut, inputs[0]), inputs[0])
def _single_flip(self, inputs):
if self.flip_axis is None:
flip_axis = tf.random.uniform([1], 0, self.n_dims, dtype='int32')
else:
idx = tf.squeeze(tf.random.uniform([1], 0, len(self.flip_axis), dtype='int32'))
flip_axis = tf.expand_dims(tf.convert_to_tensor(self.flip_axis, dtype='int32')[idx], axis=0)
return K.switch(inputs[1], K.reverse(inputs[0], axes=flip_axis), inputs[0])
class SampleConditionalGMM(Layer):
"""This layer generates an image by sampling a Gaussian Mixture Model conditioned on a label map given as input.
The parameters of the GMM are given as two additional inputs to the layer (means and standard deviations):
image = SampleConditionalGMM(generation_labels)([label_map, means, stds])
:param generation_labels: list of all possible label values contained in the input label maps.
Must be a list or a 1D numpy array of size N, where N is the total number of possible label values.
Layer inputs:
label_map: input label map of shape [batchsize, shape_dim1, ..., shape_dimn, n_channel].
All the values of label_map must be contained in generation_labels, but the input label_map doesn't necesseraly have
to contain all the values in generation_labels.
means: tensor containing the mean values of all Gaussian distributions of the GMM.
It must be of shape [batchsize, N, n_channel], and in the same order as generation label,
i.e. the ith value of generation_labels will be associated to the ith value of means.
stds: same as means but for the standard deviations of the GMM.
"""
def __init__(self, generation_labels, **kwargs):
self.generation_labels = generation_labels
self.n_labels = None
self.n_channels = None
self.max_label = None
self.indices = None
self.shape = None
super(SampleConditionalGMM, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["generation_labels"] = self.generation_labels
return config
def build(self, input_shape):
# check n_labels and n_channels
assert len(input_shape) == 3, 'should have three inputs: labels, means, std devs (in that order).'
self.n_channels = input_shape[1][-1]
self.n_labels = len(self.generation_labels)
assert self.n_labels == input_shape[1][1], 'means should have the same number of values as generation_labels'
assert self.n_labels == input_shape[2][1], 'stds should have the same number of values as generation_labels'
# scatter parameters (to build mean/std lut)
self.max_label = np.max(self.generation_labels) + 1
indices = np.concatenate([self.generation_labels + self.max_label * i for i in range(self.n_channels)], axis=-1)
self.shape = tf.convert_to_tensor([np.max(indices) + 1], dtype='int32')
self.indices = tf.convert_to_tensor(utils.add_axis(indices, axis=[0, -1]), dtype='int32')
self.built = True
super(SampleConditionalGMM, self).build(input_shape)
def call(self, inputs, **kwargs):
# reformat labels and scatter indices
batch = tf.split(tf.shape(inputs[0]), [1, -1])[0]
tmp_indices = tf.tile(self.indices, tf.concat([batch, tf.convert_to_tensor([1, 1], dtype='int32')], axis=0))
labels = tf.concat([tf.cast(inputs[0], dtype='int32') + self.max_label * i for i in range(self.n_channels)], -1)
# build mean map
means = tf.concat([inputs[1][..., i] for i in range(self.n_channels)], 1)
tile_shape = tf.concat([batch, tf.convert_to_tensor([1, ], dtype='int32')], axis=0)
means = tf.tile(tf.expand_dims(tf.scatter_nd(tmp_indices, means, self.shape), 0), tile_shape)
means_map = tf.map_fn(lambda x: tf.gather(x[0], x[1]), [means, labels], dtype=tf.float32)
# same for stds
stds = tf.concat([inputs[2][..., i] for i in range(self.n_channels)], 1)
stds = tf.tile(tf.expand_dims(tf.scatter_nd(tmp_indices, stds, self.shape), 0), tile_shape)
stds_map = tf.map_fn(lambda x: tf.gather(x[0], x[1]), [stds, labels], dtype=tf.float32)
return stds_map * tf.random.normal(tf.shape(labels)) + means_map
def compute_output_shape(self, input_shape):
return input_shape[0] if (self.n_channels == 1) else tuple(list(input_shape[0][:-1]) + [self.n_channels])
class SampleResolution(Layer):
"""Build a random resolution tensor by sampling a uniform distribution of provided range.
You can use this layer in the following ways:
resolution = SampleConditionalGMM(min_resolution)() in this case resolution will be a tensor of shape (n_dims,),
where n_dims is the length of the min_resolution parameter (provided as a list, see below).
resolution = SampleConditionalGMM(min_resolution)(input), where input is a tensor for which the first dimension
represents the batch_size. In this case resolution will be a tensor of shape (batchsize, n_dims,).
:param min_resolution: list of length n_dims specifying the inferior bounds of the uniform distributions to
sample from for each value.
:param max_res_iso: If not None, all the values of resolution will be equal to the same value, which is randomly
sampled at each minibatch in U(min_resolution, max_res_iso).
:param max_res_aniso: If not None, we first randomly select a direction i in the range [0, n_dims-1], and we sample
a value in the corresponding uniform distribution U(min_resolution[i], max_res_aniso[i]).
The other values of resolution will be set to min_resolution.
:param prob_iso: if both max_res_iso and max_res_aniso are specified, this allows to specify the probability of
sampling an isotropic resolution (therefore using max_res_iso) with respect to anisotropic resolution
(which would use max_res_aniso).
:param prob_min: if not zero, this allows to return with the specified probability an output resolution equal
to min_resolution.
:param return_thickness: if set to True, this layer will also return a thickness value of the same shape as
resolution, which will be sampled independently for each axis from the uniform distribution
U(min_resolution, resolution).
"""
def __init__(self,
min_resolution,
max_res_iso=None,
max_res_aniso=None,
prob_iso=0.1,
prob_min=0.05,
return_thickness=True,
**kwargs):
self.min_res = min_resolution
self.max_res_iso_input = max_res_iso
self.max_res_iso = None
self.max_res_aniso_input = max_res_aniso
self.max_res_aniso = None
self.prob_iso = prob_iso
self.prob_min = prob_min
self.return_thickness = return_thickness
self.n_dims = len(self.min_res)
self.add_batchsize = False
self.min_res_tens = None
super(SampleResolution, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["min_resolution"] = self.min_res
config["max_res_iso"] = self.max_res_iso
config["max_res_aniso"] = self.max_res_aniso
config["prob_iso"] = self.prob_iso
config["prob_min"] = self.prob_min
config["return_thickness"] = self.return_thickness
return config
def build(self, input_shape):
# check maximum resolutions
assert ((self.max_res_iso_input is not None) | (self.max_res_aniso_input is not None)), \
'at least one of maximinum isotropic or anisotropic resolutions must be provided, received none'
# reformat resolutions as numpy arrays
self.min_res = np.array(self.min_res)
if self.max_res_iso_input is not None:
self.max_res_iso = np.array(self.max_res_iso_input)
assert len(self.min_res) == len(self.max_res_iso), \
'min and isotropic max resolution must have the same length, ' \
'had {0} and {1}'.format(self.min_res, self.max_res_iso)
if np.array_equal(self.min_res, self.max_res_iso):
self.max_res_iso = None
if self.max_res_aniso_input is not None:
self.max_res_aniso = np.array(self.max_res_aniso_input)
assert len(self.min_res) == len(self.max_res_aniso), \
'min and anisotopic max resolution must have the same length, ' \
'had {} and {}'.format(self.min_res, self.max_res_aniso)
if np.array_equal(self.min_res, self.max_res_aniso):
self.max_res_aniso = None
# check prob iso
if (self.max_res_iso is not None) & (self.max_res_aniso is not None) & (self.prob_iso == 0):
raise Exception('prob iso is 0 while sampling either isotropic and anisotropic resolutions is enabled')
if input_shape:
self.add_batchsize = True
self.min_res_tens = tf.convert_to_tensor(self.min_res, dtype='float32')
self.built = True
super(SampleResolution, self).build(input_shape)
def call(self, inputs, **kwargs):
if not self.add_batchsize:
shape = [self.n_dims]
dim = tf.random.uniform(shape=(1, 1), minval=0, maxval=self.n_dims, dtype='int32')
mask = tf.tensor_scatter_nd_update(tf.zeros([self.n_dims], dtype='bool'), dim,
tf.convert_to_tensor([True], dtype='bool'))
else:
batch = tf.split(tf.shape(inputs), [1, -1])[0]
tile_shape = tf.concat([batch, tf.convert_to_tensor([1], dtype='int32')], axis=0)
self.min_res_tens = tf.tile(tf.expand_dims(self.min_res_tens, 0), tile_shape)
shape = tf.concat([batch, tf.convert_to_tensor([self.n_dims], dtype='int32')], axis=0)
indices = tf.stack([tf.range(0, batch[0]), tf.random.uniform(batch, 0, self.n_dims, dtype='int32')], 1)
mask = tf.tensor_scatter_nd_update(tf.zeros(shape, dtype='bool'), indices, tf.ones(batch, dtype='bool'))
# return min resolution as tensor if min=max
if (self.max_res_iso is None) & (self.max_res_aniso is None):
new_resolution = self.min_res_tens
# sample isotropic resolution only
elif (self.max_res_iso is not None) & (self.max_res_aniso is None):
new_resolution_iso = tf.random.uniform(shape, minval=self.min_res, maxval=self.max_res_iso)
new_resolution = K.switch(tf.squeeze(K.less(tf.random.uniform([1], 0, 1), self.prob_min)),
self.min_res_tens,
new_resolution_iso)
# sample anisotropic resolution only
elif (self.max_res_iso is None) & (self.max_res_aniso is not None):
new_resolution_aniso = tf.random.uniform(shape, minval=self.min_res, maxval=self.max_res_aniso)
new_resolution = K.switch(tf.squeeze(K.less(tf.random.uniform([1], 0, 1), self.prob_min)),
self.min_res_tens,
tf.where(mask, new_resolution_aniso, self.min_res_tens))
# sample either anisotropic or isotropic resolution
else:
new_resolution_iso = tf.random.uniform(shape, minval=self.min_res, maxval=self.max_res_iso)
new_resolution_aniso = tf.random.uniform(shape, minval=self.min_res, maxval=self.max_res_aniso)
new_resolution = K.switch(tf.squeeze(K.less(tf.random.uniform([1], 0, 1), self.prob_iso)),
new_resolution_iso,
tf.where(mask, new_resolution_aniso, self.min_res_tens))
new_resolution = K.switch(tf.squeeze(K.less(tf.random.uniform([1], 0, 1), self.prob_min)),
self.min_res_tens,
new_resolution)
if self.return_thickness:
return [new_resolution, tf.random.uniform(tf.shape(self.min_res_tens), self.min_res_tens, new_resolution)]
else:
return new_resolution
def compute_output_shape(self, input_shape):
if self.return_thickness:
return [(None, self.n_dims)] * 2 if self.add_batchsize else [self.n_dims] * 2
else:
return (None, self.n_dims) if self.add_batchsize else self.n_dims
class GaussianBlur(Layer):
"""Applies gaussian blur to an input image.
The input image is expected to have shape [batchsize, shape_dim1, ..., shape_dimn, channel].
:param sigma: standard deviation of the blurring kernels to apply. Can be a number, a list of length n_dims, or
a numpy array.
:param random_blur_range: (optional) if not None, this introduces a randomness in the blurring kernels, where
sigma is now multiplied by a coefficient dynamically sampled from a uniform distribution with bounds
[1/random_blur_range, random_blur_range].
:param use_mask: (optional) whether a mask of the input will be provided as an additionnal layer input. This is used
to mask the blurred image, and to correct for edge blurring effects.
example 1:
output = GaussianBlur(sigma=0.5)(input) will isotropically blur the input with a gaussian kernel of std 0.5.
example 2:
if input is a tensor of shape [batchsize, 10, 100, 200, 2]
output = GaussianBlur(sigma=[0.5, 1, 10])(input) will blur the input a different gaussian kernel in each dimension.
example 3:
output = GaussianBlur(sigma=0.5, random_blur_range=1.15)(input)
will blur the input a different gaussian kernel in each dimension, as each dimension will be associated with a
a kernel, whose standard deviation will be uniformly sampled from [0.5/1.15; 0.5*1.15].
example 4:
output = GaussianBlur(sigma=0.5, use_mask=True)([input, mask])
will 1) blur the input a different gaussian kernel in each dimension, 2) mask the blurred image with the provided
mask, and 3) correct for edge blurring effects. If the provided mask is not of boolean type, it will thresholded
above positive values.
"""
def __init__(self, sigma, random_blur_range=None, use_mask=False, **kwargs):
self.sigma = utils.reformat_to_list(sigma)
assert np.all(np.array(self.sigma) >= 0), 'sigma should be superior or equal to 0'
self.use_mask = use_mask
self.n_dims = None
self.n_channels = None
self.blur_range = random_blur_range
self.stride = None
self.separable = None
self.kernels = None
self.convnd = None
super(GaussianBlur, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["sigma"] = self.sigma
config["random_blur_range"] = self.blur_range
config["use_mask"] = self.use_mask
return config
def build(self, input_shape):
# get shapes
if self.use_mask:
assert len(input_shape) == 2, 'please provide a mask as second layer input when use_mask=True'
self.n_dims = len(input_shape[0]) - 2
self.n_channels = input_shape[0][-1]
else:
self.n_dims = len(input_shape) - 2
self.n_channels = input_shape[-1]
# prepare blurring kernel
self.stride = [1]*(self.n_dims+2)
self.sigma = utils.reformat_to_list(self.sigma, length=self.n_dims)
self.separable = np.linalg.norm(np.array(self.sigma)) > 5
if self.blur_range is None: # fixed kernels
self.kernels = l2i_et.gaussian_kernel(self.sigma, separable=self.separable)
else:
self.kernels = None
# prepare convolution
self.convnd = getattr(tf.nn, 'conv%dd' % self.n_dims)
self.built = True
super(GaussianBlur, self).build(input_shape)
def call(self, inputs, **kwargs):
if self.use_mask:
image = inputs[0]
mask = tf.cast(inputs[1], 'bool')
else:
image = inputs
mask = None
# redefine the kernels at each new step when blur_range is activated
if self.blur_range is not None:
self.kernels = l2i_et.gaussian_kernel(self.sigma, blur_range=self.blur_range, separable=self.separable)
if self.separable:
for k in self.kernels:
if k is not None:
image = tf.concat([self.convnd(tf.expand_dims(image[..., n], -1), k, self.stride, 'SAME')
for n in range(self.n_channels)], -1)
if self.use_mask:
maskb = tf.cast(mask, 'float32')
maskb = tf.concat([self.convnd(tf.expand_dims(maskb[..., n], -1), k, self.stride, 'SAME')
for n in range(self.n_channels)], -1)
image = image / (maskb + K.epsilon())
image = tf.where(mask, image, tf.zeros_like(image))
else:
if any(self.sigma):
image = tf.concat([self.convnd(tf.expand_dims(image[..., n], -1), self.kernels, self.stride, 'SAME')
for n in range(self.n_channels)], -1)
if self.use_mask:
maskb = tf.cast(mask, 'float32')
maskb = tf.concat([self.convnd(tf.expand_dims(maskb[..., n], -1), self.kernels, self.stride, 'SAME')
for n in range(self.n_channels)], -1)
image = image / (maskb + K.epsilon())
image = tf.where(mask, image, tf.zeros_like(image))
return image
class ImageGradients(Layer):
def __init__(self, gradient_type='sobel', return_magnitude=False, **kwargs):
self.gradient_type = gradient_type
assert (self.gradient_type == 'sobel') | (self.gradient_type == '1-step_diff'), \
'gradient_type should be either sobel or 1-step_diff, had %s' % self.gradient_type
# shape
self.n_dims = 0
self.shape = None
self.n_channels = 0
# convolution params if sobel diff
self.stride = None
self.kernels = None
self.convnd = None
self.return_magnitude = return_magnitude
super(ImageGradients, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["gradient_type"] = self.gradient_type
config["return_magnitude"] = self.return_magnitude
return config
def build(self, input_shape):
# get shapes
self.n_dims = len(input_shape) - 2
self.shape = input_shape[1:]
self.n_channels = input_shape[-1]
# prepare kernel if sobel gradients
if self.gradient_type == 'sobel':
self.kernels = l2i_et.sobel_kernels(self.n_dims)
self.stride = [1] * (self.n_dims + 2)
self.convnd = getattr(tf.nn, 'conv%dd' % self.n_dims)
else:
self.kernels = self.convnd = self.stride = None
self.built = True
super(ImageGradients, self).build(input_shape)
def call(self, inputs, **kwargs):
image = inputs
batchsize = tf.split(tf.shape(inputs), [1, -1])[0]
gradients = list()
# sobel method
if self.gradient_type == 'sobel':
# get sobel gradients in each direction
for n in range(self.n_dims):
gradient = image
# apply 1D kernel in each direction (sobel kernels are separable), instead of applying a nD kernel
for k in self.kernels[n]:
gradient = tf.concat([self.convnd(tf.expand_dims(gradient[..., n], -1), k, self.stride, 'SAME')
for n in range(self.n_channels)], -1)
gradients.append(gradient)
# 1-step method, only supports 2 and 3D
else:
# get 1-step diff
if self.n_dims == 2:
gradients.append(image[:, 1:, :, :] - image[:, :-1, :, :]) # dx
gradients.append(image[:, :, 1:, :] - image[:, :, :-1, :]) # dy
elif self.n_dims == 3:
gradients.append(image[:, 1:, :, :, :] - image[:, :-1, :, :, :]) # dx
gradients.append(image[:, :, 1:, :, :] - image[:, :, :-1, :, :]) # dy
gradients.append(image[:, :, :, 1:, :] - image[:, :, :, :-1, :]) # dz
else:
raise Exception('ImageGradients only support 2D or 3D tensors for 1-step diff, had: %dD' % self.n_dims)
# pad with zeros to return tensors of the same shape as input
for i in range(self.n_dims):
tmp_shape = list(self.shape)
tmp_shape[i] = 1
zeros = tf.zeros(tf.concat([batchsize, tf.convert_to_tensor(tmp_shape, dtype='int32')], 0), image.dtype)
gradients[i] = tf.concat([gradients[i], zeros], axis=i + 1)
# compute total gradient magnitude if necessary, or concatenate different gradients along the channel axis
if self.return_magnitude:
gradients = tf.sqrt(tf.reduce_sum(tf.square(tf.stack(gradients, axis=-1)), axis=-1))
else:
gradients = tf.concat(gradients, axis=-1)
return gradients
def compute_output_shape(self, input_shape):
if not self.return_magnitude:
input_shape = list(input_shape)
input_shape[-1] = self.n_dims
return tuple(input_shape)
class DynamicGaussianBlur(Layer):
"""Applies gaussian blur to an input image, where the standard deviation of the blurring kernel is provided as a
layer input, which enables to perform dynamic blurring (i.e. the blurring kernel can vary at each minibatch).
:param max_sigma: maximum value of the standard deviation that will be provided as input. This is used to compute
the size of the blurring kernels. It must be provided as a list of length n_dims.
:param random_blur_range: (optional) if not None, this introduces a randomness in the blurring kernels, where
sigma is now multiplied by a coefficient dynamically sampled from a uniform distribution with bounds
[1/random_blur_range, random_blur_range].
example:
blurred_image = DynamicGaussianBlur(max_sigma=[5.]*3, random_blurring_range=1.15)([image, sigma])
will return a blurred version of image, where the standard deviation of each dimension (given as an tensor, and with
values lower than 5 for each axis) is multiplied by a random coefficient uniformly sampled from [1/1.15; 1.15].
"""
def __init__(self, max_sigma, random_blur_range=None, **kwargs):
self.max_sigma = max_sigma
self.n_dims = None
self.n_channels = None
self.convnd = None
self.blur_range = random_blur_range
self.separable = None
super(DynamicGaussianBlur, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["max_sigma"] = self.max_sigma
config["random_blur_range"] = self.blur_range
return config
def build(self, input_shape):
assert len(input_shape) == 2, 'sigma should be provided as an input tensor for dynamic blurring'
self.n_dims = len(input_shape[0]) - 2
self.n_channels = input_shape[0][-1]
self.convnd = getattr(tf.nn, 'conv%dd' % self.n_dims)
self.max_sigma = utils.reformat_to_list(self.max_sigma, length=self.n_dims)
self.separable = np.linalg.norm(np.array(self.max_sigma)) > 5
self.built = True
super(DynamicGaussianBlur, self).build(input_shape)
def call(self, inputs, **kwargs):
image = inputs[0]
sigma = inputs[-1]
kernels = l2i_et.gaussian_kernel(sigma, self.max_sigma, self.blur_range, self.separable)
if self.separable:
for kernel in kernels:
image = tf.map_fn(self._single_blur, [image, kernel], dtype=tf.float32)
else:
image = tf.map_fn(self._single_blur, [image, kernels], dtype=tf.float32)
return image
def _single_blur(self, inputs):
if self.n_channels > 1:
split_channels = tf.split(inputs[0], [1] * self.n_channels, axis=-1)
blurred_channel = list()
for channel in split_channels:
blurred = self.convnd(tf.expand_dims(channel, 0), inputs[1], [1] * (self.n_dims + 2), padding='SAME')
blurred_channel.append(tf.squeeze(blurred, axis=0))
output = tf.concat(blurred_channel, -1)
else:
output = self.convnd(tf.expand_dims(inputs[0], 0), inputs[1], [1] * (self.n_dims + 2), padding='SAME')
output = tf.squeeze(output, axis=0)
return output
class MimicAcquisition(Layer):
"""
Layer that takes an image as input, and simulates data that has been acquired at low resolution.
The output is obtained by resampling the input twice:
- first at a resolution given as an input (i.e. the "acquisition" resolution),
- then at the output resolution (specified output shape).
The input tensor is expected to have shape [batchsize, shape_dim1, ..., shape_dimn, channel].
:param volume_res: resolution of the provided inputs. Must be a 1-D numpy array with n_dims elements.
:param min_subsample_res: lower bound of the acquisition resolutions to mimic (i.e. the input resolution must have
values higher than min-subseample_res).
:param resample_shape: shape of the output tensor
:param build_dist_map: whether to return distance maps as outputs. These indicate the distance between each voxel
and the nearest non-interpolated voxel (during the second resampling).
example 1:
im_res = [1., 1., 1.]
low_res = [1., 1., 3.]
res = tf.convert_to_tensor([1., 1., 4.5])
image is a tensor of shape (None, 256, 256, 256, 3)
resample_shape = [256, 256, 256]
output = MimicAcquisition(im_res, low_res, resample_shape)([image, res])
output will be a tensor of shape (None, 256, 256, 256, 3), obtained by downsampling image to [1., 1., 4.5].
and re-upsampling it at initial resolution (because resample_shape is equal to the input shape). In this example all
examples of the batch will be downsampled to the same resolution (because res has no batch dimension).
Note that the provided res must have higher values than min_low_res.
example 2:
im_res = [1., 1., 1.]
min_low_res = [1., 1., 1.]
res is a tensor of shape (None, 3), obtained for example by using the SampleResolution layer (see above).
image is a tensor of shape (None, 256, 256, 256, 1)
resample_shape = [128, 128, 128]
output = MimicAcquisition(im_res, low_res, resample_shape)([image, res])
output will be a tensor of shape (None, 128, 128, 128, 1), obtained by downsampling each examples of the batch to
the matching resolution in res, and resanpling them all to half the initial resolution.
Note that the provided res must have higher values than min_low_res.
"""
def __init__(self, volume_res, min_subsample_res, resample_shape, build_dist_map=False, noise_std=0, **kwargs):
# resolutions and dimensions
self.volume_res = volume_res
self.min_subsample_res = min_subsample_res
self.noise_std = noise_std
self.n_dims = len(self.volume_res)
self.n_channels = None
self.add_batchsize = None
# input and output shapes
self.inshape = None
self.resample_shape = resample_shape
# meshgrids for resampling
self.down_grid = None
self.up_grid = None
# whether to return a map indicating the distance from the interpolated voxels, to acquired ones.
self.build_dist_map = build_dist_map
super(MimicAcquisition, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["volume_res"] = self.volume_res
config["min_subsample_res"] = self.min_subsample_res
config["noise_std"] = self.noise_std
config["resample_shape"] = self.resample_shape
config["build_dist_map"] = self.build_dist_map
return config
def build(self, input_shape):
# set up input shape and acquisistion shape
self.inshape = input_shape[0][1:]
self.n_channels = input_shape[0][-1]
self.add_batchsize = False if (input_shape[1][0] is None) else True
down_tensor_shape = np.int32(np.array(self.inshape[:-1]) * self.volume_res / self.min_subsample_res)
# build interpolation meshgrids
self.down_grid = tf.expand_dims(tf.stack(nrn_utils.volshape_to_ndgrid(down_tensor_shape), -1), axis=0)
self.up_grid = tf.expand_dims(tf.stack(nrn_utils.volshape_to_ndgrid(self.resample_shape), -1), axis=0)
self.built = True
super(MimicAcquisition, self).build(input_shape)
def call(self, inputs, **kwargs):
# sort inputs
assert len(inputs) == 2, 'inputs must have two items, the tensor to resample, and the downsampling resolution'
vol = inputs[0]
subsample_res = tf.cast(inputs[1], dtype='float32')
vol = K.reshape(vol, [-1, *self.inshape]) # necessary for multi_gpu models
batchsize = tf.split(tf.shape(vol), [1, -1])[0]
tile_shape = tf.concat([batchsize, tf.ones([1], dtype='int32')], 0)
# get downsampling and upsampling factors
if self.add_batchsize:
subsample_res = tf.tile(tf.expand_dims(subsample_res, 0), tile_shape)
down_shape = tf.cast(tf.convert_to_tensor(np.array(self.inshape[:-1]) * self.volume_res, dtype='float32') /
subsample_res, dtype='int32')
down_zoom_factor = tf.cast(down_shape / tf.convert_to_tensor(self.inshape[:-1]), dtype='float32')
up_zoom_factor = tf.cast(tf.convert_to_tensor(self.resample_shape, dtype='int32') / down_shape, dtype='float32')
# downsample
down_loc = tf.tile(self.down_grid, tf.concat([batchsize, tf.ones([self.n_dims + 1], dtype='int32')], 0))
down_loc = tf.cast(down_loc, 'float32') / l2i_et.expand_dims(down_zoom_factor, axis=[1] * self.n_dims)
inshape_tens = tf.tile(tf.expand_dims(tf.convert_to_tensor(self.inshape[:-1]), 0), tile_shape)
inshape_tens = l2i_et.expand_dims(inshape_tens, axis=[1] * self.n_dims)
down_loc = K.clip(down_loc, 0., tf.cast(inshape_tens, 'float32'))
vol = tf.map_fn(self._single_down_interpn, [vol, down_loc], tf.float32)
# add noise
if self.noise_std > 0:
sample_shape = tf.concat([batchsize, tf.ones([self.n_dims], dtype='int32'),
self.n_channels * tf.ones([1], dtype='int32')], 0)
vol += tf.random.normal(tf.shape(vol), stddev=tf.random.uniform(sample_shape, maxval=self.noise_std))
# upsample
up_loc = tf.tile(self.up_grid, tf.concat([batchsize, tf.ones([self.n_dims + 1], dtype='int32')], axis=0))
up_loc = tf.cast(up_loc, 'float32') / l2i_et.expand_dims(up_zoom_factor, axis=[1] * self.n_dims)
vol = tf.map_fn(self._single_up_interpn, [vol, up_loc], tf.float32)
# return upsampled volume
if not self.build_dist_map:
return vol
# return upsampled volumes with distance maps
else:
# get grid points
floor = tf.math.floor(up_loc)
ceil = tf.math.ceil(up_loc)
# get distances of every voxel to higher and lower grid points for every dimension
f_dist = up_loc - floor
c_dist = ceil - up_loc
# keep minimum 1d distances, and compute 3d distance to nearest grid point
dist = tf.math.minimum(f_dist, c_dist) * l2i_et.expand_dims(subsample_res, axis=[1] * self.n_dims)
dist = tf.math.sqrt(tf.math.reduce_sum(tf.math.square(dist), axis=-1, keepdims=True))
return [vol, dist]
@staticmethod
def _single_down_interpn(inputs):
return nrn_utils.interpn(inputs[0], inputs[1], interp_method='nearest')
@staticmethod
def _single_up_interpn(inputs):
return nrn_utils.interpn(inputs[0], inputs[1], interp_method='linear')
def compute_output_shape(self, input_shape):
output_shape = tuple([None] + self.resample_shape + [input_shape[0][-1]])
return [output_shape] * 2 if self.build_dist_map else output_shape
class BiasFieldCorruption(Layer):
"""This layer applies a smooth random bias field to the input by applying the following steps:
1) we first sample a value for the standard deviation of a centred normal distribution
2) a small-size SVF is sampled from this normal distribution
3) the small SVF is then resized with trilinear interpolation to image size
4) it is rescaled to postive values by taking the voxel-wise exponential
5) it is multiplied to the input tensor.
The input tensor is expected to have shape [batchsize, shape_dim1, ..., shape_dimn, channel].
:param bias_field_std: maximum value of the standard deviation sampled in 1 (it will be sampled from the range
[0, bias_field_std])
:param bias_shape_factor: ratio between the shape of the input tensor and the shape of the sampled SVF.
:param same_bias_for_all_channels: whether to apply the same bias field to all the channels of the input tensor.
"""
def __init__(self, bias_field_std=.5, bias_shape_factor=.025, same_bias_for_all_channels=False, **kwargs):
# input shape
self.several_inputs = False
self.inshape = None
self.n_dims = None
self.n_channels = None
# sampling shape
self.std_shape = None
self.small_bias_shape = None
# bias field parameters
self.bias_field_std = bias_field_std
self.bias_shape_factor = bias_shape_factor
self.same_bias_for_all_channels = same_bias_for_all_channels
super(BiasFieldCorruption, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["bias_field_std"] = self.bias_field_std
config["bias_shape_factor"] = self.bias_shape_factor
config["same_bias_for_all_channels"] = self.same_bias_for_all_channels
return config
def build(self, input_shape):
# input shape
if isinstance(input_shape, list):
self.several_inputs = True
self.inshape = input_shape
else:
self.inshape = [input_shape]
self.n_dims = len(self.inshape[0]) - 2
self.n_channels = self.inshape[0][-1]
# sampling shapes
self.std_shape = [1] * (self.n_dims + 1)
self.small_bias_shape = utils.get_resample_shape(self.inshape[0][1:self.n_dims + 1], self.bias_shape_factor, 1)
if not self.same_bias_for_all_channels:
self.std_shape[-1] = self.n_channels
self.small_bias_shape[-1] = self.n_channels
self.built = True
super(BiasFieldCorruption, self).build(input_shape)
def call(self, inputs, **kwargs):
if not self.several_inputs:
inputs = [inputs]
if self.bias_field_std > 0:
# sampling shapes
batchsize = tf.split(tf.shape(inputs[0]), [1, -1])[0]
std_shape = tf.concat([batchsize, tf.convert_to_tensor(self.std_shape, dtype='int32')], 0)
bias_shape = tf.concat([batchsize, tf.convert_to_tensor(self.small_bias_shape, dtype='int32')], axis=0)
# sample small bias field
bias_field = tf.random.normal(bias_shape, stddev=tf.random.uniform(std_shape, maxval=self.bias_field_std))
# resize bias field and take exponential
bias_field = nrn_layers.Resize(size=self.inshape[0][1:self.n_dims + 1], interp_method='linear')(bias_field)
bias_field = tf.math.exp(bias_field)
return [tf.math.multiply(bias_field, v) for v in inputs]
else:
return inputs
class IntensityAugmentation(Layer):
"""This layer enables to augment the intensities of the input tensor, as well as to apply min_max normalisation.
The following steps are applied (all are optional):
1) white noise corruption, with a randomly sampled std dev.
2) clip the input between two values
3) min-max normalisation
4) gamma augmentation (i.e. voxel-wise exponentiation by a randomly sampled power)
The input tensor is expected to have shape [batchsize, shape_dim1, ..., shape_dimn, channel].
:param noise_std: maximum value of the standard deviation of the Gaussian white noise used in 1 (it will be sampled
from the range [0, noise_std]). Set to 0 to skip this step.
:param clip: clip the input tensor between the given values. Can either be: a number (in which case we clip between
0 and the given value), or a list or a numpy array with two elements. Default is 0, where no clipping occurs.
:param normalise: whether to apply min-max normalistion, to normalise between 0 and 1. Default is True.
:param norm_perc: percentiles of the sorted intensity values to take for robust normalisation. Can either be:
a number (in which case the robust minimum is the provided percentile of sorted values, and the maximum is the
1 - norm_perc percentile), or a list/numpy array of 2 elements (percentiles for the minimum and maximum values).
The minimum and maximum values are computed separately for each channel if separate_channels is True.
Default is 0, where we simply take the minimum and maximum values.
:param gamma_std: standard deviation of the normal distribution from which we sample gamma (in log domain).
Default is 0, where no gamma augmentation occurs.
:param separate_channels: whether to augment all channels separately. Default is True.
"""
def __init__(self, noise_std=0, clip=0, normalise=True, norm_perc=0, gamma_std=0, separate_channels=True,
**kwargs):
# shape attributes
self.n_dims = None
self.n_channels = None
self.flatten_shape = None
self.expand_minmax_dim = None
self.one = None
# inputs
self.noise_std = noise_std
self.clip = clip
self.clip_values = None
self.normalise = normalise
self.norm_perc = norm_perc
self.perc = None
self.gamma_std = gamma_std
self.separate_channels = separate_channels
super(IntensityAugmentation, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["noise_std"] = self.noise_std
config["clip"] = self.clip
config["normalise"] = self.normalise
config["norm_perc"] = self.norm_perc
config["gamma_std"] = self.gamma_std
config["separate_channels"] = self.separate_channels
return config
def build(self, input_shape):
self.n_dims = len(input_shape) - 2
self.n_channels = input_shape[-1]
self.flatten_shape = np.prod(np.array(input_shape[1:-1]))
self.flatten_shape = self.flatten_shape * self.n_channels if not self.separate_channels else self.flatten_shape
self.expand_minmax_dim = self.n_dims if self.separate_channels else self.n_dims + 1
self.one = tf.ones([1], dtype='int32')
if self.clip:
self.clip_values = utils.reformat_to_list(self.clip)
self.clip_values = self.clip_values if len(self.clip_values) == 2 else [0, self.clip_values[0]]
else:
self.clip_values = None
if self.norm_perc:
self.perc = utils.reformat_to_list(self.norm_perc)
self.perc = self.perc if len(self.perc) == 2 else [self.perc[0], 1 - self.perc[0]]
else:
self.perc = None
self.built = True
super(IntensityAugmentation, self).build(input_shape)
def call(self, inputs, **kwargs):
# prepare shape for sampling the noise and gamma std dev (depending on whether we augment channels separately)
batchsize = tf.split(tf.shape(inputs), [1, -1])[0]
if (self.noise_std > 0) | (self.gamma_std > 0):
sample_shape = tf.concat([batchsize, tf.ones([self.n_dims], dtype='int32')], 0)
if self.separate_channels:
sample_shape = tf.concat([sample_shape, self.n_channels * self.one], 0)
else:
sample_shape = tf.concat([sample_shape, self.one], 0)
else:
sample_shape = None
# add noise
if self.noise_std > 0:
noise_stddev = tf.random.uniform(sample_shape, maxval=self.noise_std)
if self.separate_channels:
noise = tf.random.normal(tf.shape(inputs), stddev=noise_stddev)
else:
noise = tf.random.normal(tf.shape(tf.split(inputs, [1, -1], -1)[0]), stddev=noise_stddev)
noise = tf.tile(noise, tf.convert_to_tensor([1] * (self.n_dims + 1) + [self.n_channels]))
inputs = inputs + noise
# clip images to given values
if self.clip_values is not None:
inputs = K.clip(inputs, self.clip_values[0], self.clip_values[1])
# normalise
if self.normalise:
# define robust min and max by sorting values and taking percentile
if self.perc is not None:
if self.separate_channels:
shape = tf.concat([batchsize, self.flatten_shape * self.one, self.n_channels * self.one], 0)
else:
shape = tf.concat([batchsize, self.flatten_shape * self.one], 0)
intensities = tf.sort(tf.reshape(inputs, shape), axis=1)
m = intensities[:, max(int(self.perc[0] * self.flatten_shape), 0), ...]
M = intensities[:, min(int(self.perc[1] * self.flatten_shape), self.flatten_shape - 1), ...]
# simple min and max
else:
m = K.min(inputs, axis=list(range(1, self.expand_minmax_dim + 1)))
M = K.max(inputs, axis=list(range(1, self.expand_minmax_dim + 1)))
# normalise
m = l2i_et.expand_dims(m, axis=[1] * self.expand_minmax_dim)
M = l2i_et.expand_dims(M, axis=[1] * self.expand_minmax_dim)
inputs = tf.clip_by_value(inputs, m, M)
inputs = (inputs - m) / (M - m + K.epsilon())
# apply voxel-wise exponentiation
if self.gamma_std > 0:
inputs = tf.math.pow(inputs, tf.math.exp(tf.random.normal(sample_shape, stddev=self.gamma_std)))
return inputs
class DiceLoss(Layer):
"""This layer computes the Dice loss between two tensors. These tensors are expected to 1) have the same shape, and
2) be probabilistic, i.e. they must have the same shape [batchsize, size_dim1, ..., size_dimN, n_labels] where
n_labels is the number of labels for which we compute the Dice loss."""
def __init__(self, enable_checks=True, **kwargs):
self.inshape = None
self.enable_checks = enable_checks
super(DiceLoss, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 2, 'DiceLoss expects 2 inputs to compute the Dice loss.'
assert input_shape[0] == input_shape[1], 'the two inputs must have the same shape.'
self.inshape = input_shape[0][1:]
self.built = True
super(DiceLoss, self).build(input_shape)
def call(self, inputs, **kwargs):
# make sure tensors are probabilistic
x = inputs[0]
y = inputs[1]
if self.enable_checks: # disabling is useful to, e.g., use incomplete label maps
x = K.clip(x / tf.math.reduce_sum(x, axis=-1, keepdims=True), 0, 1)
y = K.clip(y / tf.math.reduce_sum(y, axis=-1, keepdims=True), 0, 1)
# compute dice loss for each label
top = tf.math.reduce_sum(2 * x * y, axis=list(range(1, len(self.inshape))))
bottom = tf.math.square(x) + tf.math.square(y) + tf.keras.backend.epsilon()
bottom = tf.math.reduce_sum(bottom, axis=list(range(1, len(self.inshape))))
last_tensor = top / bottom
return K.mean(1 - last_tensor)
def compute_output_shape(self, input_shape):
return [[]]
class WeightedL2Loss(Layer):
"""This layer computes a L2 loss weighted by a specified factor between two tensors.
These tensors are expected to have the same shape [batchsize, size_dim1, ..., size_dimN, n_labels]
where n_labels is the number of labels for which we compute the loss.
Importantly, the first input tensor is the GT, whereas the second is the prediction."""
def __init__(self, target_value, background_weight=1e-4, **kwargs):
self.target_value = target_value
self.background_weight = background_weight
self.n_labels = None
super(WeightedL2Loss, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["target_value"] = self.target_value
config["background_weight"] = self.background_weight
return config
def build(self, input_shape):
assert len(input_shape) == 2, 'DiceLoss expects 2 inputs to compute the Dice loss.'
assert input_shape[0] == input_shape[1], 'the two inputs must have the same shape.'
self.n_labels = input_shape[0][-1]
self.built = True
super(WeightedL2Loss, self).build(input_shape)
def call(self, inputs, **kwargs):
gt = inputs[0]
pred = inputs[1]
weights = tf.expand_dims(1 - gt[..., 0] + self.background_weight, -1)
return K.sum(weights * K.square(pred - self.target_value * (2 * gt - 1))) / (K.sum(weights) * self.n_labels)
def compute_output_shape(self, input_shape):
return [[]]
class ResetValuesToZero(Layer):
"""This layer enables to reset given values to 0 within the input tensors.
:param values: list of values to be reset to 0.
example:
input = tf.convert_to_tensor(np.array([[1, 0, 2, 2, 2, 2, 0],
[1, 3, 3, 3, 3, 3, 3],
[1, 0, 0, 0, 4, 4, 4]]))
values = [1, 3]
ResetValuesToZero(values)(input)
>> [[0, 0, 2, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4, 4, 4]]
"""
def __init__(self, values, **kwargs):
assert values is not None, 'please provide correct list of values, received None'
self.values = utils.reformat_to_list(values)
self.values_tens = None
self.n_values = len(values)
super(ResetValuesToZero, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["values"] = self.values
return config
def build(self, input_shape):
self.values_tens = tf.convert_to_tensor(self.values)
self.built = True
super(ResetValuesToZero, self).build(input_shape)
def call(self, inputs, **kwargs):
values = tf.cast(self.values_tens, dtype=inputs.dtype)
for i in range(self.n_values):
inputs = tf.where(tf.equal(inputs, values[i]), tf.zeros_like(inputs), inputs)
return inputs
class ConvertLabels(Layer):
"""Convert all labels in a tensor by the corresponding given set of values.
labels_converted = ConvertLabels(source_values, dest_values)(labels).
labels must be an int32 tensor, and labels_converted will also be int32.
:param source_values: list of all the possible values in labels. Must be a list or a 1D numpy array.
:param dest_values: list of all the target label values. Must be ordered the same as source values:
labels[labels == source_values[i]] = dest_values[i].
If None (default), dest_values is equal to [0, ..., N-1], where N is the total number of values in source_values,
which enables to remap label maps to [0, ..., N-1].
"""
def __init__(self, source_values, dest_values=None, **kwargs):
self.source_values = source_values
self.dest_values = dest_values
self.lut = None
super(ConvertLabels, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["source_values"] = self.source_values
config["dest_values"] = self.dest_values
return config
def build(self, input_shape):
self.lut = tf.convert_to_tensor(utils.get_mapping_lut(self.source_values, dest=self.dest_values), dtype='int32')
self.built = True
super(ConvertLabels, self).build(input_shape)
def call(self, inputs, **kwargs):
return tf.gather(self.lut, tf.cast(inputs, dtype='int32'))
class PadAroundCentre(Layer):
"""Pad the input tensor to the specified shape with the given value.
The input tensor is expected to have shape [batchsize, shape_dim1, ..., shape_dimn, channel].
:param pad_margin: margin to use for padding. The tensor will be padded by the provided margin on each side.
Can either be a number (all axes padded with the same margin), or a list/numpy array of length n_dims.
example: if tensor is of shape [batch, x, y, z, n_channels] and margin=10, then the padded tensor will be of
shape [batch, x+2*10, y+2*10, z+2*10, n_channels].
:param pad_shape: shape to pad the tensor to. Can either be a number (all axes padded to the same shape), or a
list/numpy array of length n_dims.
:param value: value to pad the tensors with. Default is 0.
"""
def __init__(self, pad_margin=None, pad_shape=None, value=0, **kwargs):
self.pad_margin = pad_margin
self.pad_shape = pad_shape
self.value = value
self.pad_margin_tens = None
self.pad_shape_tens = None
self.n_dims = None
super(PadAroundCentre, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["pad_margin"] = self.pad_margin
config["pad_shape"] = self.pad_shape
config["value"] = self.value
return config
def build(self, input_shape):
# input shape
self.n_dims = len(input_shape) - 2
input_shape[0] = 0
input_shape[0 - 1] = 0
if self.pad_margin is not None:
assert self.pad_shape is None, 'please do not provide a padding shape and margin at the same time.'
# reformat padding margins
pad = np.transpose(np.array([[0] + utils.reformat_to_list(self.pad_margin, self.n_dims) + [0]] * 2))
self.pad_margin_tens = tf.convert_to_tensor(pad, dtype='int32')
elif self.pad_shape is not None:
assert self.pad_margin is None, 'please do not provide a padding shape and margin at the same time.'
# pad shape
tensor_shape = tf.cast(tf.convert_to_tensor(input_shape), 'int32')
self.pad_shape_tens = np.array([0] + utils.reformat_to_list(self.pad_shape, length=self.n_dims) + [0])
self.pad_shape_tens = tf.convert_to_tensor(self.pad_shape_tens, dtype='int32')
self.pad_shape_tens = tf.math.maximum(tensor_shape, self.pad_shape_tens)
# padding margin
min_margins = (self.pad_shape_tens - tensor_shape) / 2
max_margins = self.pad_shape_tens - tensor_shape - min_margins
self.pad_margin_tens = tf.stack([min_margins, max_margins], axis=-1)
else:
raise Exception('please either provide a padding shape or a padding margin.')
self.built = True
super(PadAroundCentre, self).build(input_shape)
def call(self, inputs, **kwargs):
return tf.pad(inputs, self.pad_margin_tens, mode='CONSTANT', constant_values=self.value)
class MaskEdges(Layer):
"""Reset the edges of a tensor to zero (i.e. with bands of zeros along the specified axes).
The width of the zero-band is randomly drawn from a uniform distribution, whose range is given in boundaries.
:param axes: axes along which to reset edges to zero. Can be an int (single axis), or a sequence.
:param boundaries: numpy array of shape (len(axes), 4). Each row contains the two bounds of the uniform
distributions from which we draw the width of the zero-bands on each side.
Those bounds must be expressed in relative side (i.e. between 0 and 1).
:return: a tensor of the same shape as the input, with bands of zeros along the pecified axes.
example:
tensor=tf.constant([[[[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]]]]) # shape = [1,10,10,1]
axes=1
boundaries = np.array([[0.2, 0.45, 0.85, 0.9]])
In this case, we reset the edges along the 2nd dimension (i.e. the 1st dimension after the batch dimension),
the 1st zero-band will expand from the 1st row to a number drawn from [0.2*tensor.shape[1], 0.45*tensor.shape[1]],
and the 2nd zero-band will expand from a row drawn from [0.85*tensor.shape[1], 0.9*tensor.shape[1]], to the end of
the tensor. A possible output could be:
array([[[[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]]]) # shape = [1,10,10,1]
"""
def __init__(self, axes, boundaries, prob_mask=1, **kwargs):
self.axes = utils.reformat_to_list(axes, dtype='int')
self.boundaries = utils.reformat_to_n_channels_array(boundaries, n_dims=4, n_channels=len(self.axes))
self.prob_mask = prob_mask
self.inputshape = None
super(MaskEdges, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["axes"] = self.axes
config["boundaries"] = self.boundaries
config["prob_mask"] = self.prob_mask
return config
def build(self, input_shape):
self.inputshape = input_shape
self.built = True
super(MaskEdges, self).build(input_shape)
def call(self, inputs, **kwargs):
# build mask
mask = tf.ones_like(inputs)
for i, axis in enumerate(self.axes):
# select restricting indices
axis_boundaries = self.boundaries[i, :]
idx1 = tf.math.round(tf.random.uniform([1],
minval=axis_boundaries[0] * self.inputshape[axis],
maxval=axis_boundaries[1] * self.inputshape[axis]))
idx2 = tf.math.round(tf.random.uniform([1],
minval=axis_boundaries[2] * self.inputshape[axis],
maxval=axis_boundaries[3] * self.inputshape[axis] - 1) - idx1)
idx3 = self.inputshape[axis] - idx1 - idx2
split_idx = tf.cast(tf.concat([idx1, idx2, idx3], axis=0), dtype='int32')
# update mask
split_list = tf.split(inputs, split_idx, axis=axis)
tmp_mask = tf.concat([tf.zeros_like(split_list[0]),
tf.ones_like(split_list[1]),
tf.zeros_like(split_list[2])], axis=axis)
mask = mask * tmp_mask
# mask second_channel
tensor = K.switch(tf.squeeze(K.greater(tf.random.uniform([1], 0, 1), 1 - self.prob_mask)),
inputs * mask,
inputs)
return [tensor, mask]
def compute_output_shape(self, input_shape):
return [input_shape] * 2
| [
"tensorflow.equal",
"tensorflow.shape",
"tensorflow.pad",
"tensorflow.keras.backend.epsilon",
"keras.backend.sum",
"keras.backend.reshape",
"ext.neuron.layers.SpatialTransformer",
"tensorflow.split",
"tensorflow.math.floor",
"numpy.array",
"tensorflow.ones_like",
"tensorflow.math.exp",
"tens... | [((11760, 11819), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(self.crop_shape + [-1])'], {'dtype': '"""int32"""'}), "(self.crop_shape + [-1], dtype='int32')\n", (11780, 11819), True, 'import tensorflow as tf\n'), ((11835, 11880), 'tensorflow.slice', 'tf.slice', (['vol'], {'begin': 'crop_idx', 'size': 'crop_size'}), '(vol, begin=crop_idx, size=crop_size)\n', (11843, 11880), True, 'import tensorflow as tf\n'), ((18529, 18596), 'tensorflow.map_fn', 'tf.map_fn', (['self._single_flip', '[inputs, rand_flip]'], {'dtype': 'tf.float32'}), '(self._single_flip, [inputs, rand_flip], dtype=tf.float32)\n', (18538, 18596), True, 'import tensorflow as tf\n'), ((18614, 18661), 'tensorflow.split', 'tf.split', (['inputs', 'self.list_n_channels'], {'axis': '(-1)'}), '(inputs, self.list_n_channels, axis=-1)\n', (18622, 18661), True, 'import tensorflow as tf\n'), ((26633, 26655), 'numpy.array', 'np.array', (['self.min_res'], {}), '(self.min_res)\n', (26641, 26655), True, 'import numpy as np\n'), ((27870, 27921), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.min_res'], {'dtype': '"""float32"""'}), "(self.min_res, dtype='float32')\n", (27890, 27921), True, 'import tensorflow as tf\n'), ((48115, 48150), 'tensorflow.cast', 'tf.cast', (['inputs[1]'], {'dtype': '"""float32"""'}), "(inputs[1], dtype='float32')\n", (48122, 48150), True, 'import tensorflow as tf\n'), ((48165, 48200), 'keras.backend.reshape', 'K.reshape', (['vol', '[-1, *self.inshape]'], {}), '(vol, [-1, *self.inshape])\n', (48174, 48200), True, 'import keras.backend as K\n'), ((49450, 49515), 'tensorflow.map_fn', 'tf.map_fn', (['self._single_down_interpn', '[vol, down_loc]', 'tf.float32'], {}), '(self._single_down_interpn, [vol, down_loc], tf.float32)\n', (49459, 49515), True, 'import tensorflow as tf\n'), ((50112, 50173), 'tensorflow.map_fn', 'tf.map_fn', (['self._single_up_interpn', '[vol, up_loc]', 'tf.float32'], {}), '(self._single_up_interpn, [vol, up_loc], tf.float32)\n', (50121, 50173), True, 'import tensorflow as tf\n'), ((51018, 51082), 'ext.neuron.utils.interpn', 'nrn_utils.interpn', (['inputs[0]', 'inputs[1]'], {'interp_method': '"""nearest"""'}), "(inputs[0], inputs[1], interp_method='nearest')\n", (51035, 51082), True, 'from ext.neuron import utils as nrn_utils\n'), ((51153, 51216), 'ext.neuron.utils.interpn', 'nrn_utils.interpn', (['inputs[0]', 'inputs[1]'], {'interp_method': '"""linear"""'}), "(inputs[0], inputs[1], interp_method='linear')\n", (51170, 51216), True, 'from ext.neuron import utils as nrn_utils\n'), ((58293, 58320), 'tensorflow.ones', 'tf.ones', (['[1]'], {'dtype': '"""int32"""'}), "([1], dtype='int32')\n", (58300, 58320), True, 'import tensorflow as tf\n'), ((63163, 63186), 'keras.backend.mean', 'K.mean', (['(1 - last_tensor)'], {}), '(1 - last_tensor)\n', (63169, 63186), True, 'import keras.backend as K\n'), ((64542, 64601), 'tensorflow.expand_dims', 'tf.expand_dims', (['(1 - gt[..., 0] + self.background_weight)', '(-1)'], {}), '(1 - gt[..., 0] + self.background_weight, -1)\n', (64556, 64601), True, 'import tensorflow as tf\n'), ((65827, 65860), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.values'], {}), '(self.values)\n', (65847, 65860), True, 'import tensorflow as tf\n'), ((66001, 66046), 'tensorflow.cast', 'tf.cast', (['self.values_tens'], {'dtype': 'inputs.dtype'}), '(self.values_tens, dtype=inputs.dtype)\n', (66008, 66046), True, 'import tensorflow as tf\n'), ((70615, 70701), 'tensorflow.pad', 'tf.pad', (['inputs', 'self.pad_margin_tens'], {'mode': '"""CONSTANT"""', 'constant_values': 'self.value'}), "(inputs, self.pad_margin_tens, mode='CONSTANT', constant_values=self.\n value)\n", (70621, 70701), True, 'import tensorflow as tf\n'), ((73900, 73920), 'tensorflow.ones_like', 'tf.ones_like', (['inputs'], {}), '(inputs)\n', (73912, 73920), True, 'import tensorflow as tf\n'), ((7147, 7174), 'tensorflow.cast', 'tf.cast', (['v'], {'dtype': '"""float32"""'}), "(v, dtype='float32')\n", (7154, 7174), True, 'import tensorflow as tf\n'), ((8341, 8390), 'tensorflow.random.uniform', 'tf.random.uniform', (['(1, 1)'], {'maxval': 'self.nonlin_std'}), '((1, 1), maxval=self.nonlin_std)\n', (8358, 8390), True, 'import tensorflow as tf\n'), ((8419, 8466), 'tensorflow.random.normal', 'tf.random.normal', (['trans_shape'], {'stddev': 'trans_std'}), '(trans_shape, stddev=trans_std)\n', (8435, 8466), True, 'import tensorflow as tf\n'), ((9283, 9296), 'tensorflow.cast', 'tf.cast', (['v', 't'], {}), '(v, t)\n', (9290, 9296), True, 'import tensorflow as tf\n'), ((10651, 10676), 'numpy.array', 'np.array', (['self.crop_shape'], {}), '(self.crop_shape)\n', (10659, 10676), True, 'import numpy as np\n'), ((10980, 11037), 'tensorflow.map_fn', 'tf.map_fn', (['self._single_slice', 'inputs'], {'dtype': 'inputs.dtype'}), '(self._single_slice, inputs, dtype=inputs.dtype)\n', (10989, 11037), True, 'import tensorflow as tf\n'), ((11314, 11369), 'tensorflow.map_fn', 'tf.map_fn', (['self._single_slice', 'inputs'], {'dtype': 'tf.float32'}), '(self._single_slice, inputs, dtype=tf.float32)\n', (11323, 11369), True, 'import tensorflow as tf\n'), ((11391, 11438), 'tensorflow.split', 'tf.split', (['inputs', 'self.list_n_channels'], {'axis': '(-1)'}), '(inputs, self.list_n_channels, axis=-1)\n', (11399, 11438), True, 'import tensorflow as tf\n'), ((18679, 18692), 'tensorflow.cast', 'tf.cast', (['v', 't'], {}), '(v, t)\n', (18686, 18692), True, 'import tensorflow as tf\n'), ((18799, 18834), 'tensorflow.gather', 'tf.gather', (['self.swap_lut', 'inputs[0]'], {}), '(self.swap_lut, inputs[0])\n', (18808, 18834), True, 'import tensorflow as tf\n'), ((18943, 18996), 'tensorflow.random.uniform', 'tf.random.uniform', (['[1]', '(0)', 'self.n_dims'], {'dtype': '"""int32"""'}), "([1], 0, self.n_dims, dtype='int32')\n", (18960, 18996), True, 'import tensorflow as tf\n'), ((19243, 19279), 'keras.backend.reverse', 'K.reverse', (['inputs[0]'], {'axes': 'flip_axis'}), '(inputs[0], axes=flip_axis)\n', (19252, 19279), True, 'import keras.backend as K\n'), ((21537, 21567), 'numpy.max', 'np.max', (['self.generation_labels'], {}), '(self.generation_labels)\n', (21543, 21567), True, 'import numpy as np\n'), ((26734, 26766), 'numpy.array', 'np.array', (['self.max_res_iso_input'], {}), '(self.max_res_iso_input)\n', (26742, 26766), True, 'import numpy as np\n'), ((27001, 27047), 'numpy.array_equal', 'np.array_equal', (['self.min_res', 'self.max_res_iso'], {}), '(self.min_res, self.max_res_iso)\n', (27015, 27047), True, 'import numpy as np\n'), ((27171, 27205), 'numpy.array', 'np.array', (['self.max_res_aniso_input'], {}), '(self.max_res_aniso_input)\n', (27179, 27205), True, 'import numpy as np\n'), ((27443, 27491), 'numpy.array_equal', 'np.array_equal', (['self.min_res', 'self.max_res_aniso'], {}), '(self.min_res, self.max_res_aniso)\n', (27457, 27491), True, 'import numpy as np\n'), ((28133, 28209), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '(1, 1)', 'minval': '(0)', 'maxval': 'self.n_dims', 'dtype': '"""int32"""'}), "(shape=(1, 1), minval=0, maxval=self.n_dims, dtype='int32')\n", (28150, 28209), True, 'import tensorflow as tf\n'), ((34881, 34907), 'tensorflow.cast', 'tf.cast', (['inputs[1]', '"""bool"""'], {}), "(inputs[1], 'bool')\n", (34888, 34907), True, 'import tensorflow as tf\n'), ((40197, 40226), 'tensorflow.concat', 'tf.concat', (['gradients'], {'axis': '(-1)'}), '(gradients, axis=-1)\n', (40206, 40226), True, 'import tensorflow as tf\n'), ((42974, 43038), 'tensorflow.map_fn', 'tf.map_fn', (['self._single_blur', '[image, kernels]'], {'dtype': 'tf.float32'}), '(self._single_blur, [image, kernels], dtype=tf.float32)\n', (42983, 43038), True, 'import tensorflow as tf\n'), ((43158, 43209), 'tensorflow.split', 'tf.split', (['inputs[0]', '([1] * self.n_channels)'], {'axis': '(-1)'}), '(inputs[0], [1] * self.n_channels, axis=-1)\n', (43166, 43209), True, 'import tensorflow as tf\n'), ((43497, 43527), 'tensorflow.concat', 'tf.concat', (['blurred_channel', '(-1)'], {}), '(blurred_channel, -1)\n', (43506, 43527), True, 'import tensorflow as tf\n'), ((43678, 43704), 'tensorflow.squeeze', 'tf.squeeze', (['output'], {'axis': '(0)'}), '(output, axis=0)\n', (43688, 43704), True, 'import tensorflow as tf\n'), ((49087, 49115), 'tensorflow.cast', 'tf.cast', (['down_loc', '"""float32"""'], {}), "(down_loc, 'float32')\n", (49094, 49115), True, 'import tensorflow as tf\n'), ((49402, 49434), 'tensorflow.cast', 'tf.cast', (['inshape_tens', '"""float32"""'], {}), "(inshape_tens, 'float32')\n", (49409, 49434), True, 'import tensorflow as tf\n'), ((50010, 50036), 'tensorflow.cast', 'tf.cast', (['up_loc', '"""float32"""'], {}), "(up_loc, 'float32')\n", (50017, 50036), True, 'import tensorflow as tf\n'), ((50388, 50409), 'tensorflow.math.floor', 'tf.math.floor', (['up_loc'], {}), '(up_loc)\n', (50401, 50409), True, 'import tensorflow as tf\n'), ((50429, 50449), 'tensorflow.math.ceil', 'tf.math.ceil', (['up_loc'], {}), '(up_loc)\n', (50441, 50449), True, 'import tensorflow as tf\n'), ((54858, 54881), 'tensorflow.math.exp', 'tf.math.exp', (['bias_field'], {}), '(bias_field)\n', (54869, 54881), True, 'import tensorflow as tf\n'), ((58033, 58060), 'numpy.array', 'np.array', (['input_shape[1:-1]'], {}), '(input_shape[1:-1])\n', (58041, 58060), True, 'import numpy as np\n'), ((59589, 59643), 'tensorflow.random.uniform', 'tf.random.uniform', (['sample_shape'], {'maxval': 'self.noise_std'}), '(sample_shape, maxval=self.noise_std)\n', (59606, 59643), True, 'import tensorflow as tf\n'), ((60130, 60186), 'keras.backend.clip', 'K.clip', (['inputs', 'self.clip_values[0]', 'self.clip_values[1]'], {}), '(inputs, self.clip_values[0], self.clip_values[1])\n', (60136, 60186), True, 'import keras.backend as K\n'), ((61294, 61324), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['inputs', 'm', 'M'], {}), '(inputs, m, M)\n', (61310, 61324), True, 'import tensorflow as tf\n'), ((63001, 63027), 'tensorflow.keras.backend.epsilon', 'tf.keras.backend.epsilon', ([], {}), '()\n', (63025, 63027), True, 'import tensorflow as tf\n'), ((67635, 67665), 'tensorflow.cast', 'tf.cast', (['inputs'], {'dtype': '"""int32"""'}), "(inputs, dtype='int32')\n", (67642, 67665), True, 'import tensorflow as tf\n'), ((69529, 69569), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['pad'], {'dtype': '"""int32"""'}), "(pad, dtype='int32')\n", (69549, 69569), True, 'import tensorflow as tf\n'), ((74786, 74824), 'tensorflow.split', 'tf.split', (['inputs', 'split_idx'], {'axis': 'axis'}), '(inputs, split_idx, axis=axis)\n', (74794, 74824), True, 'import tensorflow as tf\n'), ((7221, 7240), 'tensorflow.shape', 'tf.shape', (['inputs[0]'], {}), '(inputs[0])\n', (7229, 7240), True, 'import tensorflow as tf\n'), ((8715, 8775), 'ext.neuron.layers.Resize', 'nrn_layers.Resize', ([], {'size': 'resize_shape', 'interp_method': '"""linear"""'}), "(size=resize_shape, interp_method='linear')\n", (8732, 8775), True, 'import ext.neuron.layers as nrn_layers\n'), ((8819, 8838), 'ext.neuron.layers.VecInt', 'nrn_layers.VecInt', ([], {}), '()\n', (8836, 8838), True, 'import ext.neuron.layers as nrn_layers\n'), ((8882, 8956), 'ext.neuron.layers.Resize', 'nrn_layers.Resize', ([], {'size': 'self.inshape[:self.n_dims]', 'interp_method': '"""linear"""'}), "(size=self.inshape[:self.n_dims], interp_method='linear')\n", (8899, 8956), True, 'import ext.neuron.layers as nrn_layers\n'), ((10605, 10647), 'numpy.array', 'np.array', (['inputshape[0][1:self.n_dims + 1]'], {}), '(inputshape[0][1:self.n_dims + 1])\n', (10613, 10647), True, 'import numpy as np\n'), ((11459, 11472), 'tensorflow.cast', 'tf.cast', (['v', 't'], {}), '(v, t)\n', (11466, 11472), True, 'import tensorflow as tf\n'), ((11605, 11632), 'numpy.array', 'np.array', (['self.crop_max_val'], {}), '(self.crop_max_val)\n', (11613, 11632), True, 'import numpy as np\n'), ((11701, 11729), 'tensorflow.zeros', 'tf.zeros', (['[1]'], {'dtype': '"""int32"""'}), "([1], dtype='int32')\n", (11709, 11729), True, 'import tensorflow as tf\n'), ((17288, 17343), 'numpy.concatenate', 'np.concatenate', (['(rl_split[0], rl_split[2], rl_split[1])'], {}), '((rl_split[0], rl_split[2], rl_split[1]))\n', (17302, 17343), True, 'import numpy as np\n'), ((17459, 17504), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['swap_lut'], {'dtype': '"""int32"""'}), "(swap_lut, dtype='int32')\n", (17479, 17504), True, 'import tensorflow as tf\n'), ((17874, 17893), 'tensorflow.shape', 'tf.shape', (['inputs[0]'], {}), '(inputs[0])\n', (17882, 17893), True, 'import tensorflow as tf\n'), ((18455, 18476), 'tensorflow.cast', 'tf.cast', (['v', '"""float32"""'], {}), "(v, 'float32')\n", (18462, 18476), True, 'import tensorflow as tf\n'), ((22070, 22089), 'tensorflow.shape', 'tf.shape', (['inputs[0]'], {}), '(inputs[0])\n', (22078, 22089), True, 'import tensorflow as tf\n'), ((22488, 22528), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[1]'], {'dtype': '"""int32"""'}), "([1], dtype='int32')\n", (22508, 22528), True, 'import tensorflow as tf\n'), ((22580, 22625), 'tensorflow.scatter_nd', 'tf.scatter_nd', (['tmp_indices', 'means', 'self.shape'], {}), '(tmp_indices, means, self.shape)\n', (22593, 22625), True, 'import tensorflow as tf\n'), ((22683, 22704), 'tensorflow.gather', 'tf.gather', (['x[0]', 'x[1]'], {}), '(x[0], x[1])\n', (22692, 22704), True, 'import tensorflow as tf\n'), ((22885, 22929), 'tensorflow.scatter_nd', 'tf.scatter_nd', (['tmp_indices', 'stds', 'self.shape'], {}), '(tmp_indices, stds, self.shape)\n', (22898, 22929), True, 'import tensorflow as tf\n'), ((22986, 23007), 'tensorflow.gather', 'tf.gather', (['x[0]', 'x[1]'], {}), '(x[0], x[1])\n', (22995, 23007), True, 'import tensorflow as tf\n'), ((28257, 28294), 'tensorflow.zeros', 'tf.zeros', (['[self.n_dims]'], {'dtype': '"""bool"""'}), "([self.n_dims], dtype='bool')\n", (28265, 28294), True, 'import tensorflow as tf\n'), ((28348, 28390), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[True]'], {'dtype': '"""bool"""'}), "([True], dtype='bool')\n", (28368, 28390), True, 'import tensorflow as tf\n'), ((28599, 28635), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.min_res_tens', '(0)'], {}), '(self.min_res_tens, 0)\n', (28613, 28635), True, 'import tensorflow as tf\n'), ((28912, 28941), 'tensorflow.zeros', 'tf.zeros', (['shape'], {'dtype': '"""bool"""'}), "(shape, dtype='bool')\n", (28920, 28941), True, 'import tensorflow as tf\n'), ((28952, 28980), 'tensorflow.ones', 'tf.ones', (['batch'], {'dtype': '"""bool"""'}), "(batch, dtype='bool')\n", (28959, 28980), True, 'import tensorflow as tf\n'), ((29306, 29376), 'tensorflow.random.uniform', 'tf.random.uniform', (['shape'], {'minval': 'self.min_res', 'maxval': 'self.max_res_iso'}), '(shape, minval=self.min_res, maxval=self.max_res_iso)\n', (29323, 29376), True, 'import tensorflow as tf\n'), ((33200, 33220), 'numpy.array', 'np.array', (['self.sigma'], {}), '(self.sigma)\n', (33208, 33220), True, 'import numpy as np\n'), ((34380, 34400), 'numpy.array', 'np.array', (['self.sigma'], {}), '(self.sigma)\n', (34388, 34400), True, 'import numpy as np\n'), ((38151, 38167), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (38159, 38167), True, 'import tensorflow as tf\n'), ((39867, 39911), 'tensorflow.concat', 'tf.concat', (['[gradients[i], zeros]'], {'axis': '(i + 1)'}), '([gradients[i], zeros], axis=i + 1)\n', (39876, 39911), True, 'import tensorflow as tf\n'), ((42485, 42509), 'numpy.array', 'np.array', (['self.max_sigma'], {}), '(self.max_sigma)\n', (42493, 42509), True, 'import numpy as np\n'), ((42876, 42939), 'tensorflow.map_fn', 'tf.map_fn', (['self._single_blur', '[image, kernel]'], {'dtype': 'tf.float32'}), '(self._single_blur, [image, kernel], dtype=tf.float32)\n', (42885, 42939), True, 'import tensorflow as tf\n'), ((43575, 43603), 'tensorflow.expand_dims', 'tf.expand_dims', (['inputs[0]', '(0)'], {}), '(inputs[0], 0)\n', (43589, 43603), True, 'import tensorflow as tf\n'), ((47629, 47676), 'ext.neuron.utils.volshape_to_ndgrid', 'nrn_utils.volshape_to_ndgrid', (['down_tensor_shape'], {}), '(down_tensor_shape)\n', (47657, 47676), True, 'from ext.neuron import utils as nrn_utils\n'), ((47738, 47787), 'ext.neuron.utils.volshape_to_ndgrid', 'nrn_utils.volshape_to_ndgrid', (['self.resample_shape'], {}), '(self.resample_shape)\n', (47766, 47787), True, 'from ext.neuron import utils as nrn_utils\n'), ((48264, 48277), 'tensorflow.shape', 'tf.shape', (['vol'], {}), '(vol)\n', (48272, 48277), True, 'import tensorflow as tf\n'), ((48334, 48361), 'tensorflow.ones', 'tf.ones', (['[1]'], {'dtype': '"""int32"""'}), "([1], dtype='int32')\n", (48341, 48361), True, 'import tensorflow as tf\n'), ((48485, 48517), 'tensorflow.expand_dims', 'tf.expand_dims', (['subsample_res', '(0)'], {}), '(subsample_res, 0)\n', (48499, 48517), True, 'import tensorflow as tf\n'), ((48754, 48793), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.inshape[:-1]'], {}), '(self.inshape[:-1])\n', (48774, 48793), True, 'import tensorflow as tf\n'), ((48845, 48901), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.resample_shape'], {'dtype': '"""int32"""'}), "(self.resample_shape, dtype='int32')\n", (48865, 48901), True, 'import tensorflow as tf\n'), ((49225, 49264), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.inshape[:-1]'], {}), '(self.inshape[:-1])\n', (49245, 49264), True, 'import tensorflow as tf\n'), ((49781, 49794), 'tensorflow.shape', 'tf.shape', (['vol'], {}), '(vol)\n', (49789, 49794), True, 'import tensorflow as tf\n'), ((50724, 50755), 'tensorflow.math.minimum', 'tf.math.minimum', (['f_dist', 'c_dist'], {}), '(f_dist, c_dist)\n', (50739, 50755), True, 'import tensorflow as tf\n'), ((54738, 54825), 'ext.neuron.layers.Resize', 'nrn_layers.Resize', ([], {'size': 'self.inshape[0][1:self.n_dims + 1]', 'interp_method': '"""linear"""'}), "(size=self.inshape[0][1:self.n_dims + 1], interp_method=\n 'linear')\n", (54755, 54825), True, 'import ext.neuron.layers as nrn_layers\n'), ((54903, 54934), 'tensorflow.math.multiply', 'tf.math.multiply', (['bias_field', 'v'], {}), '(bias_field, v)\n', (54919, 54934), True, 'import tensorflow as tf\n'), ((59071, 59087), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (59079, 59087), True, 'import tensorflow as tf\n'), ((59319, 59375), 'tensorflow.concat', 'tf.concat', (['[sample_shape, self.n_channels * self.one]', '(0)'], {}), '([sample_shape, self.n_channels * self.one], 0)\n', (59328, 59375), True, 'import tensorflow as tf\n'), ((59425, 59463), 'tensorflow.concat', 'tf.concat', (['[sample_shape, self.one]', '(0)'], {}), '([sample_shape, self.one], 0)\n', (59434, 59463), True, 'import tensorflow as tf\n'), ((62961, 62978), 'tensorflow.math.square', 'tf.math.square', (['x'], {}), '(x)\n', (62975, 62978), True, 'import tensorflow as tf\n'), ((62981, 62998), 'tensorflow.math.square', 'tf.math.square', (['y'], {}), '(y)\n', (62995, 62998), True, 'import tensorflow as tf\n'), ((64687, 64701), 'keras.backend.sum', 'K.sum', (['weights'], {}), '(weights)\n', (64692, 64701), True, 'import keras.backend as K\n'), ((66116, 66143), 'tensorflow.equal', 'tf.equal', (['inputs', 'values[i]'], {}), '(inputs, values[i])\n', (66124, 66143), True, 'import tensorflow as tf\n'), ((66145, 66166), 'tensorflow.zeros_like', 'tf.zeros_like', (['inputs'], {}), '(inputs)\n', (66158, 66166), True, 'import tensorflow as tf\n'), ((69978, 70034), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.pad_shape_tens'], {'dtype': '"""int32"""'}), "(self.pad_shape_tens, dtype='int32')\n", (69998, 70034), True, 'import tensorflow as tf\n'), ((70069, 70119), 'tensorflow.math.maximum', 'tf.math.maximum', (['tensor_shape', 'self.pad_shape_tens'], {}), '(tensor_shape, self.pad_shape_tens)\n', (70084, 70119), True, 'import tensorflow as tf\n'), ((70327, 70372), 'tensorflow.stack', 'tf.stack', (['[min_margins, max_margins]'], {'axis': '(-1)'}), '([min_margins, max_margins], axis=-1)\n', (70335, 70372), True, 'import tensorflow as tf\n'), ((74093, 74221), 'tensorflow.random.uniform', 'tf.random.uniform', (['[1]'], {'minval': '(axis_boundaries[0] * self.inputshape[axis])', 'maxval': '(axis_boundaries[1] * self.inputshape[axis])'}), '([1], minval=axis_boundaries[0] * self.inputshape[axis],\n maxval=axis_boundaries[1] * self.inputshape[axis])\n', (74110, 74221), True, 'import tensorflow as tf\n'), ((74680, 74717), 'tensorflow.concat', 'tf.concat', (['[idx1, idx2, idx3]'], {'axis': '(0)'}), '([idx1, idx2, idx3], axis=0)\n', (74689, 74717), True, 'import tensorflow as tf\n'), ((8253, 8306), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.small_shape'], {'dtype': '"""int32"""'}), "(self.small_shape, dtype='int32')\n", (8273, 8306), True, 'import tensorflow as tf\n'), ((9170, 9202), 'ext.neuron.layers.SpatialTransformer', 'nrn_layers.SpatialTransformer', (['m'], {}), '(m)\n', (9199, 9202), True, 'import ext.neuron.layers as nrn_layers\n'), ((11244, 11265), 'tensorflow.cast', 'tf.cast', (['v', '"""float32"""'], {}), "(v, 'float32')\n", (11251, 11265), True, 'import tensorflow as tf\n'), ((18222, 18290), 'tensorflow.map_fn', 'tf.map_fn', (['self._single_swap', '[inputs[i], rand_flip]'], {'dtype': 'types[i]'}), '(self._single_swap, [inputs[i], rand_flip], dtype=types[i])\n', (18231, 18290), True, 'import tensorflow as tf\n'), ((19142, 19193), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.flip_axis'], {'dtype': '"""int32"""'}), "(self.flip_axis, dtype='int32')\n", (19162, 19193), True, 'import tensorflow as tf\n'), ((21736, 21751), 'numpy.max', 'np.max', (['indices'], {}), '(indices)\n', (21742, 21751), True, 'import numpy as np\n'), ((22165, 22208), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[1, 1]'], {'dtype': '"""int32"""'}), "([1, 1], dtype='int32')\n", (22185, 22208), True, 'import tensorflow as tf\n'), ((22248, 22281), 'tensorflow.cast', 'tf.cast', (['inputs[0]'], {'dtype': '"""int32"""'}), "(inputs[0], dtype='int32')\n", (22255, 22281), True, 'import tensorflow as tf\n'), ((23087, 23103), 'tensorflow.shape', 'tf.shape', (['labels'], {}), '(labels)\n', (23095, 23103), True, 'import tensorflow as tf\n'), ((28435, 28451), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (28443, 28451), True, 'import tensorflow as tf\n'), ((28508, 28548), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[1]'], {'dtype': '"""int32"""'}), "([1], dtype='int32')\n", (28528, 28548), True, 'import tensorflow as tf\n'), ((28688, 28738), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[self.n_dims]'], {'dtype': '"""int32"""'}), "([self.n_dims], dtype='int32')\n", (28708, 28738), True, 'import tensorflow as tf\n'), ((28781, 28802), 'tensorflow.range', 'tf.range', (['(0)', 'batch[0]'], {}), '(0, batch[0])\n', (28789, 28802), True, 'import tensorflow as tf\n'), ((28804, 28859), 'tensorflow.random.uniform', 'tf.random.uniform', (['batch', '(0)', 'self.n_dims'], {'dtype': '"""int32"""'}), "(batch, 0, self.n_dims, dtype='int32')\n", (28821, 28859), True, 'import tensorflow as tf\n'), ((29752, 29824), 'tensorflow.random.uniform', 'tf.random.uniform', (['shape'], {'minval': 'self.min_res', 'maxval': 'self.max_res_aniso'}), '(shape, minval=self.min_res, maxval=self.max_res_aniso)\n', (29769, 29824), True, 'import tensorflow as tf\n'), ((30188, 30258), 'tensorflow.random.uniform', 'tf.random.uniform', (['shape'], {'minval': 'self.min_res', 'maxval': 'self.max_res_iso'}), '(shape, minval=self.min_res, maxval=self.max_res_iso)\n', (30205, 30258), True, 'import tensorflow as tf\n'), ((30294, 30366), 'tensorflow.random.uniform', 'tf.random.uniform', (['shape'], {'minval': 'self.min_res', 'maxval': 'self.max_res_aniso'}), '(shape, minval=self.min_res, maxval=self.max_res_aniso)\n', (30311, 30366), True, 'import tensorflow as tf\n'), ((30926, 30953), 'tensorflow.shape', 'tf.shape', (['self.min_res_tens'], {}), '(self.min_res_tens)\n', (30934, 30953), True, 'import tensorflow as tf\n'), ((36217, 36241), 'tensorflow.cast', 'tf.cast', (['mask', '"""float32"""'], {}), "(mask, 'float32')\n", (36224, 36241), True, 'import tensorflow as tf\n'), ((43328, 43354), 'tensorflow.expand_dims', 'tf.expand_dims', (['channel', '(0)'], {}), '(channel, 0)\n', (43342, 43354), True, 'import tensorflow as tf\n'), ((43447, 43474), 'tensorflow.squeeze', 'tf.squeeze', (['blurred'], {'axis': '(0)'}), '(blurred, axis=0)\n', (43457, 43474), True, 'import tensorflow as tf\n'), ((47467, 47494), 'numpy.array', 'np.array', (['self.inshape[:-1]'], {}), '(self.inshape[:-1])\n', (47475, 47494), True, 'import numpy as np\n'), ((49020, 49061), 'tensorflow.ones', 'tf.ones', (['[self.n_dims + 1]'], {'dtype': '"""int32"""'}), "([self.n_dims + 1], dtype='int32')\n", (49027, 49061), True, 'import tensorflow as tf\n'), ((49617, 49654), 'tensorflow.ones', 'tf.ones', (['[self.n_dims]'], {'dtype': '"""int32"""'}), "([self.n_dims], dtype='int32')\n", (49624, 49654), True, 'import tensorflow as tf\n'), ((49803, 49857), 'tensorflow.random.uniform', 'tf.random.uniform', (['sample_shape'], {'maxval': 'self.noise_std'}), '(sample_shape, maxval=self.noise_std)\n', (49820, 49857), True, 'import tensorflow as tf\n'), ((49940, 49981), 'tensorflow.ones', 'tf.ones', (['[self.n_dims + 1]'], {'dtype': '"""int32"""'}), "([self.n_dims + 1], dtype='int32')\n", (49947, 49981), True, 'import tensorflow as tf\n'), ((50867, 50887), 'tensorflow.math.square', 'tf.math.square', (['dist'], {}), '(dist)\n', (50881, 50887), True, 'import tensorflow as tf\n'), ((54249, 54268), 'tensorflow.shape', 'tf.shape', (['inputs[0]'], {}), '(inputs[0])\n', (54257, 54268), True, 'import tensorflow as tf\n'), ((54328, 54379), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.std_shape'], {'dtype': '"""int32"""'}), "(self.std_shape, dtype='int32')\n", (54348, 54379), True, 'import tensorflow as tf\n'), ((54432, 54490), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.small_bias_shape'], {'dtype': '"""int32"""'}), "(self.small_bias_shape, dtype='int32')\n", (54452, 54490), True, 'import tensorflow as tf\n'), ((54601, 54657), 'tensorflow.random.uniform', 'tf.random.uniform', (['std_shape'], {'maxval': 'self.bias_field_std'}), '(std_shape, maxval=self.bias_field_std)\n', (54618, 54657), True, 'import tensorflow as tf\n'), ((59206, 59243), 'tensorflow.ones', 'tf.ones', (['[self.n_dims]'], {'dtype': '"""int32"""'}), "([self.n_dims], dtype='int32')\n", (59213, 59243), True, 'import tensorflow as tf\n'), ((59724, 59740), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (59732, 59740), True, 'import tensorflow as tf\n'), ((59926, 59991), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['([1] * (self.n_dims + 1) + [self.n_channels])'], {}), '([1] * (self.n_dims + 1) + [self.n_channels])\n', (59946, 59991), True, 'import tensorflow as tf\n'), ((60424, 60513), 'tensorflow.concat', 'tf.concat', (['[batchsize, self.flatten_shape * self.one, self.n_channels * self.one]', '(0)'], {}), '([batchsize, self.flatten_shape * self.one, self.n_channels * self\n .one], 0)\n', (60433, 60513), True, 'import tensorflow as tf\n'), ((60559, 60615), 'tensorflow.concat', 'tf.concat', (['[batchsize, self.flatten_shape * self.one]', '(0)'], {}), '([batchsize, self.flatten_shape * self.one], 0)\n', (60568, 60615), True, 'import tensorflow as tf\n'), ((60654, 60679), 'tensorflow.reshape', 'tf.reshape', (['inputs', 'shape'], {}), '(inputs, shape)\n', (60664, 60679), True, 'import tensorflow as tf\n'), ((61370, 61381), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (61379, 61381), True, 'import keras.backend as K\n'), ((61510, 61563), 'tensorflow.random.normal', 'tf.random.normal', (['sample_shape'], {'stddev': 'self.gamma_std'}), '(sample_shape, stddev=self.gamma_std)\n', (61526, 61563), True, 'import tensorflow as tf\n'), ((62683, 62728), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['x'], {'axis': '(-1)', 'keepdims': '(True)'}), '(x, axis=-1, keepdims=True)\n', (62701, 62728), True, 'import tensorflow as tf\n'), ((62763, 62808), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['y'], {'axis': '(-1)', 'keepdims': '(True)'}), '(y, axis=-1, keepdims=True)\n', (62781, 62808), True, 'import tensorflow as tf\n'), ((64633, 64682), 'keras.backend.square', 'K.square', (['(pred - self.target_value * (2 * gt - 1))'], {}), '(pred - self.target_value * (2 * gt - 1))\n', (64641, 64682), True, 'import keras.backend as K\n'), ((69785, 69818), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input_shape'], {}), '(input_shape)\n', (69805, 69818), True, 'import tensorflow as tf\n'), ((74354, 74486), 'tensorflow.random.uniform', 'tf.random.uniform', (['[1]'], {'minval': '(axis_boundaries[2] * self.inputshape[axis])', 'maxval': '(axis_boundaries[3] * self.inputshape[axis] - 1)'}), '([1], minval=axis_boundaries[2] * self.inputshape[axis],\n maxval=axis_boundaries[3] * self.inputshape[axis] - 1)\n', (74371, 74486), True, 'import tensorflow as tf\n'), ((74859, 74887), 'tensorflow.zeros_like', 'tf.zeros_like', (['split_list[0]'], {}), '(split_list[0])\n', (74872, 74887), True, 'import tensorflow as tf\n'), ((74923, 74950), 'tensorflow.ones_like', 'tf.ones_like', (['split_list[1]'], {}), '(split_list[1])\n', (74935, 74950), True, 'import tensorflow as tf\n'), ((74986, 75014), 'tensorflow.zeros_like', 'tf.zeros_like', (['split_list[2]'], {}), '(split_list[2])\n', (74999, 75014), True, 'import tensorflow as tf\n'), ((75141, 75169), 'tensorflow.random.uniform', 'tf.random.uniform', (['[1]', '(0)', '(1)'], {}), '([1], 0, 1)\n', (75158, 75169), True, 'import tensorflow as tf\n'), ((17990, 18015), 'tensorflow.ones', 'tf.ones', (['(1)'], {'dtype': '"""int32"""'}), "(1, dtype='int32')\n", (17997, 18015), True, 'import tensorflow as tf\n'), ((30023, 30078), 'tensorflow.where', 'tf.where', (['mask', 'new_resolution_aniso', 'self.min_res_tens'], {}), '(mask, new_resolution_aniso, self.min_res_tens)\n', (30031, 30078), True, 'import tensorflow as tf\n'), ((30566, 30621), 'tensorflow.where', 'tf.where', (['mask', 'new_resolution_aniso', 'self.min_res_tens'], {}), '(mask, new_resolution_aniso, self.min_res_tens)\n', (30574, 30621), True, 'import tensorflow as tf\n'), ((35561, 35585), 'tensorflow.cast', 'tf.cast', (['mask', '"""float32"""'], {}), "(mask, 'float32')\n", (35568, 35585), True, 'import tensorflow as tf\n'), ((36548, 36568), 'tensorflow.zeros_like', 'tf.zeros_like', (['image'], {}), '(image)\n', (36561, 36568), True, 'import tensorflow as tf\n'), ((40118, 40146), 'tensorflow.stack', 'tf.stack', (['gradients'], {'axis': '(-1)'}), '(gradients, axis=-1)\n', (40126, 40146), True, 'import tensorflow as tf\n'), ((48581, 48608), 'numpy.array', 'np.array', (['self.inshape[:-1]'], {}), '(self.inshape[:-1])\n', (48589, 48608), True, 'import numpy as np\n'), ((49712, 49739), 'tensorflow.ones', 'tf.ones', (['[1]'], {'dtype': '"""int32"""'}), "([1], dtype='int32')\n", (49719, 49739), True, 'import tensorflow as tf\n'), ((29433, 29461), 'tensorflow.random.uniform', 'tf.random.uniform', (['[1]', '(0)', '(1)'], {}), '([1], 0, 1)\n', (29450, 29461), True, 'import tensorflow as tf\n'), ((35897, 35917), 'tensorflow.zeros_like', 'tf.zeros_like', (['image'], {}), '(image)\n', (35910, 35917), True, 'import tensorflow as tf\n'), ((36012, 36045), 'tensorflow.expand_dims', 'tf.expand_dims', (['image[..., n]', '(-1)'], {}), '(image[..., n], -1)\n', (36026, 36045), True, 'import tensorflow as tf\n'), ((36485, 36496), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (36494, 36496), True, 'import keras.backend as K\n'), ((39770, 39816), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['tmp_shape'], {'dtype': '"""int32"""'}), "(tmp_shape, dtype='int32')\n", (39790, 39816), True, 'import tensorflow as tf\n'), ((59831, 59860), 'tensorflow.split', 'tf.split', (['inputs', '[1, -1]', '(-1)'], {}), '(inputs, [1, -1], -1)\n', (59839, 59860), True, 'import tensorflow as tf\n'), ((29881, 29909), 'tensorflow.random.uniform', 'tf.random.uniform', (['[1]', '(0)', '(1)'], {}), '([1], 0, 1)\n', (29898, 29909), True, 'import tensorflow as tf\n'), ((30423, 30451), 'tensorflow.random.uniform', 'tf.random.uniform', (['[1]', '(0)', '(1)'], {}), '([1], 0, 1)\n', (30440, 30451), True, 'import tensorflow as tf\n'), ((30679, 30707), 'tensorflow.random.uniform', 'tf.random.uniform', (['[1]', '(0)', '(1)'], {}), '([1], 0, 1)\n', (30696, 30707), True, 'import tensorflow as tf\n'), ((35355, 35388), 'tensorflow.expand_dims', 'tf.expand_dims', (['image[..., n]', '(-1)'], {}), '(image[..., n], -1)\n', (35369, 35388), True, 'import tensorflow as tf\n'), ((35830, 35841), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (35839, 35841), True, 'import keras.backend as K\n'), ((36293, 36326), 'tensorflow.expand_dims', 'tf.expand_dims', (['maskb[..., n]', '(-1)'], {}), '(maskb[..., n], -1)\n', (36307, 36326), True, 'import tensorflow as tf\n'), ((38611, 38647), 'tensorflow.expand_dims', 'tf.expand_dims', (['gradient[..., n]', '(-1)'], {}), '(gradient[..., n], -1)\n', (38625, 38647), True, 'import tensorflow as tf\n'), ((35641, 35674), 'tensorflow.expand_dims', 'tf.expand_dims', (['maskb[..., n]', '(-1)'], {}), '(maskb[..., n], -1)\n', (35655, 35674), True, 'import tensorflow as tf\n')] |
import numpy as np
import openmdao.api as om
from ...utils.constants import INF_BOUND
from ...options import options as dymos_options
class BoundaryConstraintComp(om.ExplicitComponent):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._no_check_partials = not dymos_options['include_check_partials']
def initialize(self):
self.options.declare('loc', values=('initial', 'final'),
desc='the location in the phase of this boundary constraint '
'(either \'initial\' or \'final\'')
self._constraints = []
self._vars = {}
def configure_io(self):
"""
Define the independent variables as output variables.
I/O creation is delayed until configure so that we can determine the shape and units for
the states.
"""
for (name, kwargs) in self._constraints:
input_name = '{0}_value_in:{1}'.format(self.options['loc'], name)
output_name = '{0}_value:{1}'.format(self.options['loc'], name)
self._vars[name] = {'input_name': input_name,
'output_name': output_name,
'shape': kwargs['shape']}
input_kwargs = {k: kwargs[k] for k in ('units', 'shape', 'desc')}
self.add_input(input_name, **input_kwargs)
output_kwargs = {k: kwargs[k] for k in ('units', 'shape', 'desc')}
self.add_output(output_name, **output_kwargs)
constraint_kwargs = {k: kwargs.get(k, None)
for k in ('lower', 'upper', 'equals', 'ref', 'ref0', 'adder',
'scaler', 'indices', 'linear')}
self.add_constraint(output_name, **constraint_kwargs)
# Setup partials
for name, options in self._vars.items():
size = int(np.prod(options['shape']))
rs = np.arange(size)
cs = np.arange(size)
self.declare_partials(of=options['output_name'],
wrt=options['input_name'],
val=np.ones(size),
rows=rs,
cols=cs)
def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):
for name, options in self._vars.items():
outputs[options['output_name']] = inputs[options['input_name']]
def _add_constraint(self, name, units=None, res_units=None, desc='',
shape=None, indices=None, flat_indices=True,
lower=None, upper=None, equals=None,
scaler=None, adder=None, ref=1.0, ref0=0.0,
linear=False, res_ref=1.0, distributed=False):
"""
Add an initial constraint to this component
Parameters
----------
name : str
name of the variable in this component's namespace.
val : float or list or tuple or ndarray
The initial value of the variable being added in user-defined units. Default is 1.0.
shape : int or tuple or list or None
Shape of this variable, only required if val is not an array.
Default is None.
indices : tuple, list, ndarray, or None
The indices of the output variable to be boundary constrained. If provided, the
resulting constraint is always a 1D vector with the number of elements provided in
indices. Indices should be a 1D sequence of tuples, each providing an index into the
source output if flat_indices is False, or integers if flat_indices is True.
flat_indices : bool
Whether or not indices is provided as 'flat' indices per OpenMDAO's flat_source_indices
option when connecting variables.
units : str or None
Units in which the output variables will be provided to the component during execution.
Default is None, which means it has no units.
desc : str
description of the variable
lower : float or list or tuple or ndarray or None
lower bound(s) in user-defined units. It can be (1) a float, (2) an array_like
consistent with the shape arg (if given), or (3) an array_like matching the shape of
val, if val is array_like. A value of None means this output has no lower bound.
Default is None.
upper : float or list or tuple or ndarray or None
upper bound(s) in user-defined units. It can be (1) a float, (2) an array_like
consistent with the shape arg (if given), or (3) an array_like matching the shape of
val, if val is array_like. A value of None means this output has no upper bound.
Default is None.
scaler : float or None
A multiplicative scaler on the constraint value for the optimizer.
adder : float or None
A parameter which is added to the value before scaler is applied to produce
the value seen by the optimizer.
ref : float or None
Scaling parameter. The value in the user-defined units of this output variable when
the scaled value is 1. Default is 1.
ref0 : float or None
Scaling parameter. The value in the user-defined units of this output variable when
the scaled value is 0. Default is 0.
linear : bool
True if the *total* derivative of the constrained variable is linear, otherwise False.
distributed : bool
If True, this variable is distributed across multiple processes.
"""
lower = -INF_BOUND if upper is not None and lower is None else lower
upper = INF_BOUND if lower is not None and upper is None else upper
kwargs = {'units': units, 'res_units': res_units, 'desc': desc,
'shape': shape, 'indices': indices, 'flat_indices': flat_indices,
'lower': lower, 'upper': upper, 'equals': equals,
'scaler': scaler, 'adder': adder, 'ref': ref, 'ref0': ref0, 'linear': linear,
'res_ref': res_ref, 'distributed': distributed}
self._constraints.append((name, kwargs))
| [
"numpy.prod",
"numpy.ones",
"numpy.arange"
] | [((1965, 1980), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (1974, 1980), True, 'import numpy as np\n'), ((1998, 2013), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (2007, 2013), True, 'import numpy as np\n'), ((1920, 1945), 'numpy.prod', 'np.prod', (["options['shape']"], {}), "(options['shape'])\n", (1927, 1945), True, 'import numpy as np\n'), ((2175, 2188), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (2182, 2188), True, 'import numpy as np\n')] |
import pytest
from datetime import datetime
import pytz
import platform
from time import sleep
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
import pandas.util.testing as tm
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
else:
return PROJECT_ID
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
else:
return PRIVATE_KEY_JSON_PATH
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(tm.TestCase):
@classmethod
def setUpClass(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def tearDownClass(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=10000,
private_key=_get_private_key_path())
sleep(30) # <- Curses Google!!!
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
self.assertEqual(result['num_rows'][0], test_size)
| [
"pytz.timezone",
"pandas.compat.range",
"os.environ.get",
"time.sleep",
"numpy.random.randint",
"pytest.importorskip",
"pytest.skip",
"numpy.random.randn",
"platform.python_version"
] | [((262, 295), 'pytest.importorskip', 'pytest.importorskip', (['"""pandas_gbq"""'], {}), "('pandas_gbq')\n", (281, 295), False, 'import pytest\n'), ((594, 619), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (617, 619), False, 'import platform\n'), ((1634, 1646), 'pandas.compat.range', 'range', (['(1)', '(10)'], {}), '(1, 10)\n', (1639, 1646), False, 'from pandas.compat import range\n'), ((2261, 2290), 'numpy.random.randn', 'np.random.randn', (['(1)', 'test_size'], {}), '(1, test_size)\n', (2276, 2290), True, 'import numpy as np\n'), ((2302, 2347), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {'size': '(1, test_size)'}), '(1, 10, size=(1, test_size))\n', (2319, 2347), True, 'import numpy as np\n'), ((690, 754), 'pytest.skip', 'pytest.skip', (['"""Cannot run integration tests without a project id"""'], {}), "('Cannot run integration tests without a project id')\n", (701, 754), False, 'import pytest\n'), ((850, 935), 'pytest.skip', 'pytest.skip', (['"""Cannot run integration tests without a private key json file path"""'], {}), "('Cannot run integration tests without a private key json file path'\n )\n", (861, 935), False, 'import pytest\n'), ((1151, 1183), 'os.environ.get', 'os.environ.get', (['"""GBQ_PROJECT_ID"""'], {}), "('GBQ_PROJECT_ID')\n", (1165, 1183), False, 'import os\n'), ((3896, 3905), 'time.sleep', 'sleep', (['(30)'], {}), '(30)\n', (3901, 3905), False, 'from time import sleep\n'), ((1908, 1920), 'pandas.compat.range', 'range', (['(1)', '(20)'], {}), '(1, 20)\n', (1913, 1920), False, 'from pandas.compat import range\n'), ((2195, 2236), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(1, test_size)'}), '(2, size=(1, test_size))\n', (2212, 2236), True, 'import numpy as np\n'), ((2359, 2404), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {'size': '(1, test_size)'}), '(1, 10, size=(1, test_size))\n', (2376, 2404), True, 'import numpy as np\n'), ((2443, 2470), 'pytz.timezone', 'pytz.timezone', (['"""US/Arizona"""'], {}), "('US/Arizona')\n", (2456, 2470), False, 'import pytz\n'), ((2494, 2510), 'pandas.compat.range', 'range', (['test_size'], {}), '(test_size)\n', (2499, 2510), False, 'from pandas.compat import range\n'), ((2739, 2755), 'pandas.compat.range', 'range', (['test_size'], {}), '(test_size)\n', (2744, 2755), False, 'from pandas.compat import range\n'), ((1314, 1348), 'os.environ.get', 'os.environ.get', (['"""TRAVIS_BUILD_DIR"""'], {}), "('TRAVIS_BUILD_DIR')\n", (1328, 1348), False, 'import os\n')] |
import pickle
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_,
assert_allclose)
from refnx.analysis import Parameter, Model
def line(x, params, *args, **kwds):
p_arr = np.array(params)
return p_arr[0] + x * p_arr[1]
def line2(x, p):
return p['c'].value + p['m'].value * x
def line3(x, params, x_err=None):
pass
class TestModel(object):
def setup_method(self):
pass
def test_evaluation(self):
c = Parameter(1.0, name='c')
m = Parameter(2.0, name='m')
p = c | m
fit_model = Model(p, fitfunc=line)
x = np.linspace(0, 100., 20)
y = 2. * x + 1.
# different ways of getting the model instance to evaluate
assert_equal(fit_model.model(x, p), y)
assert_equal(fit_model(x, p), y)
assert_equal(fit_model.model(x), y)
assert_equal(fit_model(x), y)
# can we pickle the model object
pkl = pickle.dumps(fit_model)
unpkl = pickle.loads(pkl)
assert_equal(unpkl(x), y)
# you should be able to use a lambda
fit_model = Model(p, fitfunc=line2)
assert_equal(fit_model(x, p), y)
# and swap the order of parameters - retrieve by key
p = m | c
fit_model = Model(p, fitfunc=line2)
assert_equal(fit_model(x, p), y)
def test_xerr(self):
c = Parameter(1.0, name='c')
m = Parameter(2.0, name='m')
p = c | m
fit_model = Model(p, fitfunc=line3)
assert_(fit_model._fitfunc_has_xerr is True)
fit_model = Model(p, fitfunc=line2)
assert_(fit_model._fitfunc_has_xerr is False)
| [
"refnx.analysis.Model",
"pickle.dumps",
"numpy.testing.assert_",
"numpy.array",
"numpy.linspace",
"refnx.analysis.Parameter",
"pickle.loads"
] | [((244, 260), 'numpy.array', 'np.array', (['params'], {}), '(params)\n', (252, 260), True, 'import numpy as np\n'), ((516, 540), 'refnx.analysis.Parameter', 'Parameter', (['(1.0)'], {'name': '"""c"""'}), "(1.0, name='c')\n", (525, 540), False, 'from refnx.analysis import Parameter, Model\n'), ((553, 577), 'refnx.analysis.Parameter', 'Parameter', (['(2.0)'], {'name': '"""m"""'}), "(2.0, name='m')\n", (562, 577), False, 'from refnx.analysis import Parameter, Model\n'), ((617, 639), 'refnx.analysis.Model', 'Model', (['p'], {'fitfunc': 'line'}), '(p, fitfunc=line)\n', (622, 639), False, 'from refnx.analysis import Parameter, Model\n'), ((652, 677), 'numpy.linspace', 'np.linspace', (['(0)', '(100.0)', '(20)'], {}), '(0, 100.0, 20)\n', (663, 677), True, 'import numpy as np\n'), ((995, 1018), 'pickle.dumps', 'pickle.dumps', (['fit_model'], {}), '(fit_model)\n', (1007, 1018), False, 'import pickle\n'), ((1035, 1052), 'pickle.loads', 'pickle.loads', (['pkl'], {}), '(pkl)\n', (1047, 1052), False, 'import pickle\n'), ((1153, 1176), 'refnx.analysis.Model', 'Model', (['p'], {'fitfunc': 'line2'}), '(p, fitfunc=line2)\n', (1158, 1176), False, 'from refnx.analysis import Parameter, Model\n'), ((1318, 1341), 'refnx.analysis.Model', 'Model', (['p'], {'fitfunc': 'line2'}), '(p, fitfunc=line2)\n', (1323, 1341), False, 'from refnx.analysis import Parameter, Model\n'), ((1421, 1445), 'refnx.analysis.Parameter', 'Parameter', (['(1.0)'], {'name': '"""c"""'}), "(1.0, name='c')\n", (1430, 1445), False, 'from refnx.analysis import Parameter, Model\n'), ((1458, 1482), 'refnx.analysis.Parameter', 'Parameter', (['(2.0)'], {'name': '"""m"""'}), "(2.0, name='m')\n", (1467, 1482), False, 'from refnx.analysis import Parameter, Model\n'), ((1522, 1545), 'refnx.analysis.Model', 'Model', (['p'], {'fitfunc': 'line3'}), '(p, fitfunc=line3)\n', (1527, 1545), False, 'from refnx.analysis import Parameter, Model\n'), ((1554, 1598), 'numpy.testing.assert_', 'assert_', (['(fit_model._fitfunc_has_xerr is True)'], {}), '(fit_model._fitfunc_has_xerr is True)\n', (1561, 1598), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_, assert_allclose\n'), ((1620, 1643), 'refnx.analysis.Model', 'Model', (['p'], {'fitfunc': 'line2'}), '(p, fitfunc=line2)\n', (1625, 1643), False, 'from refnx.analysis import Parameter, Model\n'), ((1652, 1697), 'numpy.testing.assert_', 'assert_', (['(fit_model._fitfunc_has_xerr is False)'], {}), '(fit_model._fitfunc_has_xerr is False)\n', (1659, 1697), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_, assert_allclose\n')] |
"""Colormaps."""
# --- import --------------------------------------------------------------------------------------
import collections
import numpy as np
from numpy import r_
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as mplcolors
import matplotlib.gridspec as grd
# --- define -------------------------------------------------------------------------------------
__all__ = [
"colormaps",
"get_color_cycle",
"grayify_cmap",
"overline_colors",
"plot_colormap_components",
]
# --- functions ----------------------------------------------------------------------------------
def make_cubehelix(name="WrightTools", gamma=0.5, s=0.25, r=-1, h=1.3, reverse=False, darkest=0.7):
"""Define cubehelix type colorbars.
Look `here`__ for more information.
__ http://arxiv.org/abs/1108.5083
Parameters
----------
name : string (optional)
Name of new cmap. Default is WrightTools.
gamma : number (optional)
Intensity factor. Default is 0.5
s : number (optional)
Start color factor. Default is 0.25
r : number (optional)
Number and direction of rotations. Default is -1
h : number (option)
Hue factor. Default is 1.3
reverse : boolean (optional)
Toggle reversal of output colormap. By default (Reverse = False),
colormap goes from light to dark.
darkest : number (optional)
Default is 0.7
Returns
-------
matplotlib.colors.LinearSegmentedColormap
See Also
--------
plot_colormap_components
Displays RGB components of colormaps.
"""
rr = .213 / .30
rg = .715 / .99
rb = .072 / .11
def get_color_function(p0, p1):
def color(x):
# Calculate amplitude and angle of deviation from the black to
# white diagonal in the plane of constant perceived intensity.
xg = darkest * x ** gamma
lum = 1 - xg # starts at 1
if reverse:
lum = lum[::-1]
a = lum.copy()
a[lum < 0.5] = h * lum[lum < 0.5] / 2.
a[lum >= 0.5] = h * (1 - lum[lum >= 0.5]) / 2.
phi = 2 * np.pi * (s / 3 + r * x)
out = lum + a * (p0 * np.cos(phi) + p1 * np.sin(phi))
return out
return color
rgb_dict = {
"red": get_color_function(-0.14861 * rr, 1.78277 * rr),
"green": get_color_function(-0.29227 * rg, -0.90649 * rg),
"blue": get_color_function(1.97294 * rb, 0.0),
}
cmap = matplotlib.colors.LinearSegmentedColormap(name, rgb_dict)
return cmap
def make_colormap(seq, name="CustomMap", plot=False):
"""Generate a LinearSegmentedColormap.
Parameters
----------
seq : list of tuples
A sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
name : string (optional)
A name for the colormap
plot : boolean (optional)
Use to generate a plot of the colormap (Default is False).
Returns
-------
matplotlib.colors.LinearSegmentedColormap
`Source`__
__ http://nbviewer.ipython.org/gist/anonymous/a4fa0adb08f9e9ea4f94
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {"red": [], "green": [], "blue": []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict["red"].append([item, r1, r2])
cdict["green"].append([item, g1, g2])
cdict["blue"].append([item, b1, b2])
cmap = mplcolors.LinearSegmentedColormap(name, cdict)
if plot:
plot_colormap_components(cmap)
return cmap
def nm_to_rgb(nm):
"""Convert a wavelength to corresponding RGB values [0.0-1.0].
Parameters
----------
nm : int or float
The wavelength of light.
Returns
-------
List of [R,G,B] values between 0 and 1
`original code`__
__ http://www.physics.sfasu.edu/astro/color/spectra.html
"""
w = int(nm)
# color ---------------------------------------------------------------------------------------
if w >= 380 and w < 440:
R = -(w - 440.) / (440. - 350.)
G = 0.0
B = 1.0
elif w >= 440 and w < 490:
R = 0.0
G = (w - 440.) / (490. - 440.)
B = 1.0
elif w >= 490 and w < 510:
R = 0.0
G = 1.0
B = -(w - 510.) / (510. - 490.)
elif w >= 510 and w < 580:
R = (w - 510.) / (580. - 510.)
G = 1.0
B = 0.0
elif w >= 580 and w < 645:
R = 1.0
G = -(w - 645.) / (645. - 580.)
B = 0.0
elif w >= 645 and w <= 780:
R = 1.0
G = 0.0
B = 0.0
else:
R = 0.0
G = 0.0
B = 0.0
# intensity correction ------------------------------------------------------------------------
if w >= 380 and w < 420:
SSS = 0.3 + 0.7 * (w - 350) / (420 - 350)
elif w >= 420 and w <= 700:
SSS = 1.0
elif w > 700 and w <= 780:
SSS = 0.3 + 0.7 * (780 - w) / (780 - 700)
else:
SSS = 0.0
SSS *= 255
return [float(int(SSS * R) / 256.), float(int(SSS * G) / 256.), float(int(SSS * B) / 256.)]
def plot_colormap_components(cmap):
"""Plot the components of a given colormap."""
from ._helpers import set_ax_labels # recursive import protection
plt.figure(figsize=[8, 4])
gs = grd.GridSpec(3, 1, height_ratios=[1, 10, 1], hspace=0.05)
# colorbar
ax = plt.subplot(gs[0])
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
ax.imshow(gradient, aspect="auto", cmap=cmap, vmin=0., vmax=1.)
ax.set_title(cmap.name, fontsize=20)
ax.set_axis_off()
# components
ax = plt.subplot(gs[1])
x = np.arange(cmap.N)
colors = cmap(x)
r = colors[:, 0]
g = colors[:, 1]
b = colors[:, 2]
RGB_weight = [0.299, 0.587, 0.114]
k = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))
r.clip(0, 1, out=r)
g.clip(0, 1, out=g)
b.clip(0, 1, out=b)
xi = np.linspace(0, 1, x.size)
plt.plot(xi, r, "r", linewidth=5, alpha=0.6)
plt.plot(xi, g, "g", linewidth=5, alpha=0.6)
plt.plot(xi, b, "b", linewidth=5, alpha=0.6)
plt.plot(xi, k, "k", linewidth=5, alpha=0.6)
ax.set_xlim(0, 1)
ax.set_ylim(-0.1, 1.1)
set_ax_labels(ax=ax, xlabel=None, xticks=False, ylabel="intensity")
# grayified colorbar
cmap = grayify_cmap(cmap)
ax = plt.subplot(gs[2])
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
ax.imshow(gradient, aspect="auto", cmap=cmap, vmin=0., vmax=1.)
ax.set_axis_off()
def grayify_cmap(cmap):
"""Return a grayscale version of the colormap.
`Source`__
__ https://jakevdp.github.io/blog/2014/10/16/how-bad-is-your-colormap/
"""
cmap = plt.cm.get_cmap(cmap)
colors = cmap(np.arange(cmap.N))
# convert RGBA to perceived greyscale luminance
# cf. http://alienryderflex.com/hsp.html
RGB_weight = [0.299, 0.587, 0.114]
luminance = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))
colors[:, :3] = luminance[:, np.newaxis]
return mplcolors.LinearSegmentedColormap.from_list(cmap.name + "_grayscale", colors, cmap.N)
def get_color_cycle(n, cmap="rainbow", rotations=3):
"""Get a list of RGBA colors following a colormap.
Useful for plotting lots of elements, keeping the color of each unique.
Parameters
----------
n : integer
The number of colors to return.
cmap : string (optional)
The colormap to use in the cycle. Default is rainbow.
rotations : integer (optional)
The number of times to repeat the colormap over the cycle. Default is 3.
Returns
-------
list
List of RGBA lists.
"""
cmap = colormaps[cmap]
if np.mod(n, rotations) == 0:
per = np.floor_divide(n, rotations)
else:
per = np.floor_divide(n, rotations) + 1
vals = list(np.linspace(0, 1, per))
vals = vals * rotations
vals = vals[:n]
out = cmap(vals)
return out
# --- color maps ----------------------------------------------------------------------------------
cubehelix = make_cubehelix()
experimental = [
"#FFFFFF",
"#0000FF",
"#0080FF",
"#00FFFF",
"#00FF00",
"#FFFF00",
"#FF8000",
"#FF0000",
"#881111",
]
greenscale = ["#000000", "#00FF00"] # black # green
greyscale = ["#FFFFFF", "#000000"] # white # black
invisible = ["#FFFFFF", "#FFFFFF"] # white # white
# isoluminant colorbar based on the research of Kindlmann et al.
# http://dx.doi.org/10.1109/VISUAL.2002.1183788
c = mplcolors.ColorConverter().to_rgb
isoluminant1 = make_colormap(
[
c(r_[1.000, 1.000, 1.000]),
c(r_[0.847, 0.057, 0.057]),
1 / 6.,
c(r_[0.847, 0.057, 0.057]),
c(r_[0.527, 0.527, 0.000]),
2 / 6.,
c(r_[0.527, 0.527, 0.000]),
c(r_[0.000, 0.592, 0.000]),
3 / 6.,
c(r_[0.000, 0.592, 0.000]),
c(r_[0.000, 0.559, 0.559]),
4 / 6.,
c(r_[0.000, 0.559, 0.559]),
c(r_[0.316, 0.316, 0.991]),
5 / 6.,
c(r_[0.316, 0.316, 0.991]),
c(r_[0.718, 0.000, 0.718]),
],
name="isoluminant`",
)
isoluminant2 = make_colormap(
[
c(r_[1.000, 1.000, 1.000]),
c(r_[0.718, 0.000, 0.718]),
1 / 6.,
c(r_[0.718, 0.000, 0.718]),
c(r_[0.316, 0.316, 0.991]),
2 / 6.,
c(r_[0.316, 0.316, 0.991]),
c(r_[0.000, 0.559, 0.559]),
3 / 6.,
c(r_[0.000, 0.559, 0.559]),
c(r_[0.000, 0.592, 0.000]),
4 / 6.,
c(r_[0.000, 0.592, 0.000]),
c(r_[0.527, 0.527, 0.000]),
5 / 6.,
c(r_[0.527, 0.527, 0.000]),
c(r_[0.847, 0.057, 0.057]),
],
name="isoluminant2",
)
isoluminant3 = make_colormap(
[
c(r_[1.000, 1.000, 1.000]),
c(r_[0.316, 0.316, 0.991]),
1 / 5.,
c(r_[0.316, 0.316, 0.991]),
c(r_[0.000, 0.559, 0.559]),
2 / 5.,
c(r_[0.000, 0.559, 0.559]),
c(r_[0.000, 0.592, 0.000]),
3 / 5.,
c(r_[0.000, 0.592, 0.000]),
c(r_[0.527, 0.527, 0.000]),
4 / 5.,
c(r_[0.527, 0.527, 0.000]),
c(r_[0.847, 0.057, 0.057]),
],
name="isoluminant3",
)
signed = [
"#0000FF", # blue
"#002AFF",
"#0055FF",
"#007FFF",
"#00AAFF",
"#00D4FF",
"#00FFFF",
"#FFFFFF", # white
"#FFFF00",
"#FFD400",
"#FFAA00",
"#FF7F00",
"#FF5500",
"#FF2A00",
"#FF0000",
] # red
signed_old = [
"#0000FF", # blue
"#00BBFF", # blue-aqua
"#00FFFF", # aqua
"#FFFFFF", # white
"#FFFF00", # yellow
"#FFBB00", # orange
"#FF0000",
] # red
skyebar = [
"#FFFFFF", # white
"#000000", # black
"#0000FF", # blue
"#00FFFF", # cyan
"#64FF00", # light green
"#FFFF00", # yellow
"#FF8000", # orange
"#FF0000", # red
"#800000",
] # dark red
skyebar_d = [
"#000000", # black
"#0000FF", # blue
"#00FFFF", # cyan
"#64FF00", # light green
"#FFFF00", # yellow
"#FF8000", # orange
"#FF0000", # red
"#800000",
] # dark red
skyebar_i = [
"#000000", # black
"#FFFFFF", # white
"#0000FF", # blue
"#00FFFF", # cyan
"#64FF00", # light green
"#FFFF00", # yellow
"#FF8000", # orange
"#FF0000", # red
"#800000",
] # dark red
wright = ["#FFFFFF", "#0000FF", "#00FFFF", "#00FF00", "#FFFF00", "#FF0000", "#881111"]
colormaps = collections.OrderedDict()
colormaps["coolwarm"] = plt.get_cmap("coolwarm")
colormaps["cubehelix"] = plt.get_cmap("cubehelix_r")
colormaps["default"] = cubehelix
colormaps["flag"] = plt.get_cmap("flag")
colormaps["greenscale"] = mplcolors.LinearSegmentedColormap.from_list("greenscale", greenscale)
colormaps["greyscale"] = mplcolors.LinearSegmentedColormap.from_list("greyscale", greyscale)
colormaps["invisible"] = mplcolors.LinearSegmentedColormap.from_list("invisible", invisible)
colormaps["isoluminant1"] = isoluminant1
colormaps["isoluminant2"] = isoluminant2
colormaps["isoluminant3"] = isoluminant3
colormaps["prism"] = plt.get_cmap("prism")
colormaps["rainbow"] = plt.get_cmap("rainbow")
colormaps["seismic"] = plt.get_cmap("seismic")
colormaps["signed"] = plt.get_cmap("bwr")
colormaps["signed_old"] = mplcolors.LinearSegmentedColormap.from_list("signed", signed_old)
colormaps["skyebar1"] = mplcolors.LinearSegmentedColormap.from_list("skyebar", skyebar)
colormaps["skyebar2"] = mplcolors.LinearSegmentedColormap.from_list("skyebar dark", skyebar_d)
colormaps["skyebar3"] = mplcolors.LinearSegmentedColormap.from_list("skyebar inverted", skyebar_i)
colormaps["wright"] = mplcolors.LinearSegmentedColormap.from_list("wright", wright)
# enforce grey as 'bad' value for colormaps
for cmap in colormaps.values():
cmap.set_bad([0.75] * 3, 1)
# enforce under and over for default colormap
colormaps["default"].set_under([0.50] * 3, 1)
colormaps["default"].set_over("m")
# enforce under and over for signed colormap
colormaps["signed"].set_under("c")
colormaps["signed"].set_over("m")
# a nice set of line colors
overline_colors = ["#CCFF00", "#FE4EDA", "#FF6600", "#00FFBF", "#00B7EB"]
| [
"collections.OrderedDict",
"matplotlib.colors.ColorConverter",
"numpy.floor_divide",
"numpy.sin",
"matplotlib.colors.LinearSegmentedColormap",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"numpy.mod",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"numpy.linspace",
"nump... | [((11685, 11710), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (11708, 11710), False, 'import collections\n'), ((11735, 11759), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""coolwarm"""'], {}), "('coolwarm')\n", (11747, 11759), True, 'import matplotlib.pyplot as plt\n'), ((11785, 11812), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""cubehelix_r"""'], {}), "('cubehelix_r')\n", (11797, 11812), True, 'import matplotlib.pyplot as plt\n'), ((11866, 11886), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""flag"""'], {}), "('flag')\n", (11878, 11886), True, 'import matplotlib.pyplot as plt\n'), ((11913, 11982), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mplcolors.LinearSegmentedColormap.from_list', (['"""greenscale"""', 'greenscale'], {}), "('greenscale', greenscale)\n", (11956, 11982), True, 'import matplotlib.colors as mplcolors\n'), ((12008, 12075), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mplcolors.LinearSegmentedColormap.from_list', (['"""greyscale"""', 'greyscale'], {}), "('greyscale', greyscale)\n", (12051, 12075), True, 'import matplotlib.colors as mplcolors\n'), ((12101, 12168), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mplcolors.LinearSegmentedColormap.from_list', (['"""invisible"""', 'invisible'], {}), "('invisible', invisible)\n", (12144, 12168), True, 'import matplotlib.colors as mplcolors\n'), ((12313, 12334), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""prism"""'], {}), "('prism')\n", (12325, 12334), True, 'import matplotlib.pyplot as plt\n'), ((12358, 12381), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""rainbow"""'], {}), "('rainbow')\n", (12370, 12381), True, 'import matplotlib.pyplot as plt\n'), ((12405, 12428), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""seismic"""'], {}), "('seismic')\n", (12417, 12428), True, 'import matplotlib.pyplot as plt\n'), ((12451, 12470), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""bwr"""'], {}), "('bwr')\n", (12463, 12470), True, 'import matplotlib.pyplot as plt\n'), ((12497, 12562), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mplcolors.LinearSegmentedColormap.from_list', (['"""signed"""', 'signed_old'], {}), "('signed', signed_old)\n", (12540, 12562), True, 'import matplotlib.colors as mplcolors\n'), ((12587, 12650), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mplcolors.LinearSegmentedColormap.from_list', (['"""skyebar"""', 'skyebar'], {}), "('skyebar', skyebar)\n", (12630, 12650), True, 'import matplotlib.colors as mplcolors\n'), ((12675, 12745), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mplcolors.LinearSegmentedColormap.from_list', (['"""skyebar dark"""', 'skyebar_d'], {}), "('skyebar dark', skyebar_d)\n", (12718, 12745), True, 'import matplotlib.colors as mplcolors\n'), ((12770, 12844), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mplcolors.LinearSegmentedColormap.from_list', (['"""skyebar inverted"""', 'skyebar_i'], {}), "('skyebar inverted', skyebar_i)\n", (12813, 12844), True, 'import matplotlib.colors as mplcolors\n'), ((12867, 12928), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mplcolors.LinearSegmentedColormap.from_list', (['"""wright"""', 'wright'], {}), "('wright', wright)\n", (12910, 12928), True, 'import matplotlib.colors as mplcolors\n'), ((2555, 2612), 'matplotlib.colors.LinearSegmentedColormap', 'matplotlib.colors.LinearSegmentedColormap', (['name', 'rgb_dict'], {}), '(name, rgb_dict)\n', (2596, 2612), False, 'import matplotlib\n'), ((3634, 3680), 'matplotlib.colors.LinearSegmentedColormap', 'mplcolors.LinearSegmentedColormap', (['name', 'cdict'], {}), '(name, cdict)\n', (3667, 3680), True, 'import matplotlib.colors as mplcolors\n'), ((5464, 5490), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[8, 4]'}), '(figsize=[8, 4])\n', (5474, 5490), True, 'import matplotlib.pyplot as plt\n'), ((5500, 5557), 'matplotlib.gridspec.GridSpec', 'grd.GridSpec', (['(3)', '(1)'], {'height_ratios': '[1, 10, 1]', 'hspace': '(0.05)'}), '(3, 1, height_ratios=[1, 10, 1], hspace=0.05)\n', (5512, 5557), True, 'import matplotlib.gridspec as grd\n'), ((5582, 5600), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (5593, 5600), True, 'import matplotlib.pyplot as plt\n'), ((5616, 5638), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(256)'], {}), '(0, 1, 256)\n', (5627, 5638), True, 'import numpy as np\n'), ((5654, 5685), 'numpy.vstack', 'np.vstack', (['(gradient, gradient)'], {}), '((gradient, gradient))\n', (5663, 5685), True, 'import numpy as np\n'), ((5843, 5861), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1]'], {}), '(gs[1])\n', (5854, 5861), True, 'import matplotlib.pyplot as plt\n'), ((5870, 5887), 'numpy.arange', 'np.arange', (['cmap.N'], {}), '(cmap.N)\n', (5879, 5887), True, 'import numpy as np\n'), ((6148, 6173), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'x.size'], {}), '(0, 1, x.size)\n', (6159, 6173), True, 'import numpy as np\n'), ((6178, 6222), 'matplotlib.pyplot.plot', 'plt.plot', (['xi', 'r', '"""r"""'], {'linewidth': '(5)', 'alpha': '(0.6)'}), "(xi, r, 'r', linewidth=5, alpha=0.6)\n", (6186, 6222), True, 'import matplotlib.pyplot as plt\n'), ((6227, 6271), 'matplotlib.pyplot.plot', 'plt.plot', (['xi', 'g', '"""g"""'], {'linewidth': '(5)', 'alpha': '(0.6)'}), "(xi, g, 'g', linewidth=5, alpha=0.6)\n", (6235, 6271), True, 'import matplotlib.pyplot as plt\n'), ((6276, 6320), 'matplotlib.pyplot.plot', 'plt.plot', (['xi', 'b', '"""b"""'], {'linewidth': '(5)', 'alpha': '(0.6)'}), "(xi, b, 'b', linewidth=5, alpha=0.6)\n", (6284, 6320), True, 'import matplotlib.pyplot as plt\n'), ((6325, 6369), 'matplotlib.pyplot.plot', 'plt.plot', (['xi', 'k', '"""k"""'], {'linewidth': '(5)', 'alpha': '(0.6)'}), "(xi, k, 'k', linewidth=5, alpha=0.6)\n", (6333, 6369), True, 'import matplotlib.pyplot as plt\n'), ((6555, 6573), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (6566, 6573), True, 'import matplotlib.pyplot as plt\n'), ((6589, 6611), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(256)'], {}), '(0, 1, 256)\n', (6600, 6611), True, 'import numpy as np\n'), ((6627, 6658), 'numpy.vstack', 'np.vstack', (['(gradient, gradient)'], {}), '((gradient, gradient))\n', (6636, 6658), True, 'import numpy as np\n'), ((6937, 6958), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['cmap'], {}), '(cmap)\n', (6952, 6958), True, 'import matplotlib.pyplot as plt\n'), ((7252, 7341), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mplcolors.LinearSegmentedColormap.from_list', (["(cmap.name + '_grayscale')", 'colors', 'cmap.N'], {}), "(cmap.name + '_grayscale',\n colors, cmap.N)\n", (7295, 7341), True, 'import matplotlib.colors as mplcolors\n'), ((8745, 8771), 'matplotlib.colors.ColorConverter', 'mplcolors.ColorConverter', ([], {}), '()\n', (8769, 8771), True, 'import matplotlib.colors as mplcolors\n'), ((6027, 6065), 'numpy.dot', 'np.dot', (['(colors[:, :3] ** 2)', 'RGB_weight'], {}), '(colors[:, :3] ** 2, RGB_weight)\n', (6033, 6065), True, 'import numpy as np\n'), ((6977, 6994), 'numpy.arange', 'np.arange', (['cmap.N'], {}), '(cmap.N)\n', (6986, 6994), True, 'import numpy as np\n'), ((7156, 7194), 'numpy.dot', 'np.dot', (['(colors[:, :3] ** 2)', 'RGB_weight'], {}), '(colors[:, :3] ** 2, RGB_weight)\n', (7162, 7194), True, 'import numpy as np\n'), ((7923, 7943), 'numpy.mod', 'np.mod', (['n', 'rotations'], {}), '(n, rotations)\n', (7929, 7943), True, 'import numpy as np\n'), ((7964, 7993), 'numpy.floor_divide', 'np.floor_divide', (['n', 'rotations'], {}), '(n, rotations)\n', (7979, 7993), True, 'import numpy as np\n'), ((8068, 8090), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'per'], {}), '(0, 1, per)\n', (8079, 8090), True, 'import numpy as np\n'), ((8018, 8047), 'numpy.floor_divide', 'np.floor_divide', (['n', 'rotations'], {}), '(n, rotations)\n', (8033, 8047), True, 'import numpy as np\n'), ((2257, 2268), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (2263, 2268), True, 'import numpy as np\n'), ((2276, 2287), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2282, 2287), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import tensorflow as tf
from tflearn.layers.conv import global_avg_pool
import argparse
class Model():
"""docstring for ClassName"""
def __init__(self, arg):
self.arg = arg
self.trainingmode = tf.constant(True,dtype=tf.bool)
self.testingmode = tf.constant(False,dtype=tf.bool)
self.global_step = tf.Variable(0, trainable=False)
self.BATCH_SIZE=arg.BATCH_SIZE
self.IMG_W=arg.IMG_W
self.IMG_H=arg.IMG_H
self.se_block=[]
def basic_conv_block(self,x,f_num,kernel_size,strides_size,padding_mode,is_training):
x = tf.layers.conv2d(x,f_num,(kernel_size,kernel_size),strides=(strides_size,strides_size),padding=padding_mode,use_bias=True,kernel_initializer=tf.truncated_normal_initializer() ,bias_initializer=tf.truncated_normal_initializer(),kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),bias_regularizer=tf.contrib.layers.l2_regularizer(0.1))
x = tf.layers.batch_normalization(x,training = is_training)
x = tf.nn.relu(x)
return x
def basic_deconv_block(self,x,f_num,kernel_size,strides_size,padding_mode,is_training):
x = tf.layers.conv2d_transpose(x,f_num,(kernel_size,kernel_size),strides=(strides_size,strides_size),padding=padding_mode,use_bias=True,kernel_initializer=tf.truncated_normal_initializer() ,bias_initializer=tf.truncated_normal_initializer(),kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),bias_regularizer=tf.contrib.layers.l2_regularizer(0.1))
x = tf.layers.batch_normalization(x,training = is_training)
x = tf.nn.relu(x)
return x
def conv_k3_s2(self,x,filters_num):
with tf.variable_scope('conv_k3_s2',reuse=tf.AUTO_REUSE):
x = tf.layers.conv2d(inputs=x,filters=filters_num,kernel_size=(3,3),strides=(2,2),padding='same',use_bias=True,kernel_initializer=tf.truncated_normal_initializer() ,bias_initializer=tf.truncated_normal_initializer(),kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),bias_regularizer=tf.contrib.layers.l2_regularizer(0.1))
return x
def conv_k1_s1(self,x,filters_num):
x = tf.layers.conv2d(inputs=x,filters=filters_num,kernel_size=(1,1),strides=(1,1),padding='same',use_bias=True,kernel_initializer=tf.truncated_normal_initializer() ,bias_initializer=tf.truncated_normal_initializer(),kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),bias_regularizer=tf.contrib.layers.l2_regularizer(0.1))
return x
def conv_k3_s1(self,x,filters_num):
x = tf.layers.conv2d(inputs=x,filters=filters_num,kernel_size=(3,3),strides=(1,1),padding='same',use_bias=True,kernel_initializer=tf.truncated_normal_initializer() ,bias_initializer=tf.truncated_normal_initializer(),kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),bias_regularizer=tf.contrib.layers.l2_regularizer(0.1))
return x
def conv_k3_s1_ns(self,x,filters_num):
x = tf.layers.conv2d(inputs=x,filters=filters_num,kernel_size=(3,3),strides=(1,1),padding='valid',use_bias=True,kernel_initializer=tf.truncated_normal_initializer() ,bias_initializer=tf.truncated_normal_initializer(),kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),bias_regularizer=tf.contrib.layers.l2_regularizer(0.1))
return x
def deconv_k3_s2(self,x,filters_num):
with tf.variable_scope('deconv_k3_s2',reuse=tf.AUTO_REUSE):
x = tf.layers.conv2d_transpose(inputs=x,filters=filters_num,kernel_size=(3,3),strides=(2,2),padding='same',use_bias=True,kernel_initializer=tf.truncated_normal_initializer() ,bias_initializer=tf.truncated_normal_initializer(),kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),bias_regularizer=tf.contrib.layers.l2_regularizer(0.1))
return x
def deconv_k3_s2_s(self,x,filters_num):
x = tf.layers.conv2d_transpose(inputs=x,filters=filters_num,kernel_size=(3,3),strides=(2,2),padding='same',use_bias=True,kernel_initializer=tf.truncated_normal_initializer() ,bias_initializer=tf.truncated_normal_initializer(),kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),bias_regularizer=tf.contrib.layers.l2_regularizer(0.1))
return x
def deconv_k1_s1(self,x,filters_num):
x = tf.layers.conv2d_transpose(inputs=x,filters=filters_num,kernel_size=(1,1),strides=(1,1),padding='same',use_bias=True,kernel_initializer=tf.truncated_normal_initializer() ,bias_initializer=tf.truncated_normal_initializer(),kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),bias_regularizer=tf.contrib.layers.l2_regularizer(0.1))
return x
def deconv_k3_s1(self,x,filters_num):
x = tf.layers.conv2d_transpose(inputs=x,filters=filters_num,kernel_size=(3,3),strides=(1,1),padding='same',use_bias=True,kernel_initializer=tf.truncated_normal_initializer() ,bias_initializer=tf.truncated_normal_initializer(),kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1),bias_regularizer=tf.contrib.layers.l2_regularizer(0.1))
return x
def conv_res(self,x,f_num,i,is_training):
channel_size=x.shape[3]
if channel_size!=f_num:
x = self.conv_k3_s2(x,f_num)
with tf.variable_scope('conv'+str(2*i),reuse=tf.AUTO_REUSE):
x1 = self.conv_k3_s1(x,f_num)
x1 = tf.layers.batch_normalization(x1,training = is_training)
x1 = tf.nn.relu(x1)
#print('conv'+str(2*i),x1.shape)
with tf.variable_scope('conv'+str(2*i+1),reuse=tf.AUTO_REUSE):
x2 = self.conv_k3_s1(x1,f_num)
x2=x2+x
x2 = tf.layers.batch_normalization(x2,training = is_training)
x2 = tf.nn.relu(x2)
#print('conv'+str(2*i+1),x2.shape)
return x2
def Relu(self,x):
return tf.nn.relu(x)
def Sigmoid(self,x) :
return tf.nn.sigmoid(x)
def deconv_res(self,x,f_num,i,is_training):
channel_size=x.shape[3]
if channel_size!=f_num:
x = self.deconv_k3_s2(x,f_num)
with tf.variable_scope('deconv'+str(2*i),reuse=tf.AUTO_REUSE):
x1 = self.deconv_k3_s1(x,f_num)
x1 = tf.layers.batch_normalization(x1,training = is_training)
x1 = tf.nn.relu(x1)
#print('deconv'+str(2*i),x1.shape)
with tf.variable_scope('deconv'+str(2*i+1),reuse=tf.AUTO_REUSE):
x2 = self.deconv_k3_s1(x1,f_num)
x2=x2+x
x2 = tf.layers.batch_normalization(x2,training = is_training)
x2 = tf.nn.relu(x2)
#print('deconv'+str(2*i+1),x2.shape)
return x2
def Global_Average_Pooling(self,x):
return global_avg_pool(x, name='Global_avg_pooling')
def Fully_connected(self,x, units, layer_name='fully_connected') :
with tf.name_scope(layer_name) :
return tf.layers.dense(inputs=x, use_bias=True, units=units)
def Squeeze_excitation_layer(self,input_x, out_dim, ratio, layer_name):
with tf.name_scope(layer_name) :
ratio=8
squeeze = self.Global_Average_Pooling(input_x)
excitation = self.Fully_connected(squeeze, units=out_dim / ratio, layer_name=layer_name+'_fully_connected1')
excitation = self.Relu(excitation)
excitation = self.Fully_connected(excitation, units=out_dim, layer_name=layer_name+'_fully_connected2')
excitation = self.Sigmoid(excitation)
excitation = tf.reshape(excitation, [-1,1,1,out_dim])
scale = input_x * excitation
self.se_block.append(excitation[0,0,0,:])
return scale
def spp_layer(self,input_, levels=4, name = 'SPP_layer',pool_type = 'max_pool'):
shape = input_.get_shape().as_list()
with tf.variable_scope(name):
for l in range(levels):
l = l + 1
ksize = [1, np.ceil(shape[1]/ l + 1).astype(np.int32), np.ceil(shape[2] / l + 1).astype(np.int32), 1]
strides = [1, np.floor(shape[1] / l + 1).astype(np.int32), np.floor(shape[2] / l + 1).astype(np.int32), 1]
if pool_type == 'max_pool':
pool, maxp1_argmax, maxp1_argmax_mask = self.max_pool(input_, ksize)
unpool1=self.max_unpool(input_, maxp1_argmax, maxp1_argmax_mask, ksize)
pool = tf.reshape(pool,(shape[0],-1),)
if l == 1:
x_flatten = tf.reshape(pool,(shape[0],-1))
else:
x_flatten = tf.concat((x_flatten,pool),axis=1)
print("Pool Level {:}: shape {:}".format(l, x_flatten.get_shape().as_list()))
return x_flatten
def max_pool(self,inp, k):
return tf.nn.max_pool_with_argmax_and_mask(inp, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding="SAME")
def max_unpool(self,inp, argmax, argmax_mask, k):
return tf.nn.max_unpool(inp, argmax, argmax_mask, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding="SAME")
def add_layer(self,name, l,is_training):
shape = l.get_shape().as_list()
in_channel = shape[3]
with tf.variable_scope(name+"res1",reuse=tf.AUTO_REUSE):
c = self.conv_k3_s1(l,in_channel)
c = tf.nn.relu(c)
c = tf.layers.batch_normalization(c,training = is_training)
#c = tf.layers.dropout(c, rate=0.8, training=is_training)
with tf.variable_scope(name+"res2",reuse=tf.AUTO_REUSE):
c = self.conv_k3_s1(l,in_channel)
#c=c+l
c = tf.nn.relu(c)
c = tf.layers.batch_normalization(c,training = is_training)
#c = tf.layers.dropout(c, rate=0.8, training=is_training)
l = tf.concat([c, l], 3)
return l
def rgb_dense_block(self,rgb_raw,basic_channel,is_training):
rgb_feature=[]
rgb_SE=[]
with tf.variable_scope('basic_channel'):
rgb_raw = self.conv_k3_s1(rgb_raw,basic_channel)
rgb_raw = tf.nn.relu(rgb_raw)
rgb_raw = tf.layers.batch_normalization(rgb_raw,training = is_training)
with tf.variable_scope('rgb_feature'):
d1 = self.add_layer('rgb_dense_layer.{}'.format(1), rgb_raw,is_training)
with tf.variable_scope('reduce1'):
d1 = self.conv_k1_s1(d1,64)
d1=d1+rgb_raw
d1=self.Squeeze_excitation_layer(d1, 64, 2, "rgb1")
print('rgb_f1')
print(d1.shape)
d2 = self.add_layer('rgb_dense_layer.{}'.format(2), d1,is_training)
#d2 = tf.layers.dropout(d2, rate=0.2, training=is_training)
with tf.variable_scope('reduce2'):
d2 = self.conv_k1_s1(d2,64)
#d2=d2+d1
d2=self.Squeeze_excitation_layer(d2, 64, 2, "rgb2")
print('rgb_f2')
print(d2.shape)
d3 = self.add_layer('rgb_dense_layer.{}'.format(3), d2,is_training)
#d3 = tf.layers.dropout(d3, rate=0.2, training=is_training)
with tf.variable_scope('reduce3'):
d3 = self.conv_k1_s1(d3,64)
d3=d3+d2
d3=self.Squeeze_excitation_layer(d3, 64, 2, "rgb3")
print('rgb_f3')
print(d3.shape)
d4 = self.add_layer('rgb_dense_layer.{}'.format(4), d3,is_training)
#d4 = tf.layers.dropout(d4, rate=0.2, training=is_training)
with tf.variable_scope('reduce4'):
d4 = self.conv_k1_s1(d4,64)
d4=d4+d3
d4=self.Squeeze_excitation_layer(d4, 64, 2, "rgb4")
print('rgb_f4')
print(d4.shape)
d5 = self.add_layer('rgb_dense_layer.{}'.format(5), d4,is_training)
#d5 = tf.layers.dropout(d5, rate=0.2, training=is_training)
with tf.variable_scope('reduce5'):
d5 = self.conv_k1_s1(d5,64)
d5=d5+d4
d5=self.Squeeze_excitation_layer(d5, 64, 2, "rgb5")
#d5 = tf.nn.max_pool(d5,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME')
with tf.variable_scope('reduce_channel'):
#d5=self.conv_k1_s1(d5,100)
print('rgb_f5')
print(d5.shape)
rgb_feature.append(d1)
rgb_feature.append(d2)
rgb_feature.append(d3)
rgb_feature.append(d4)
rgb_feature.append(d5)
#rgb_SE.append(rgbexcitation1)
#rgb_SE.append(rgbexcitation2)
#rgb_SE.append(rgbexcitation3)
#rgb_SE.append(rgbexcitation4)
#rgb_SE.append(rgbexcitation5)
return rgb_feature
def small_fcn(self,img,is_training,name):
with tf.variable_scope(name):
shape = img.get_shape().as_list()
in_channel = shape[3]
H=shape[1]
W=shape[2]
with tf.variable_scope("c1"):
c1= self.conv_k3_s1(img,in_channel)
c1 = tf.nn.relu(c1)
c1 = tf.layers.batch_normalization(c1,training = is_training)
c1=self.Squeeze_excitation_layer(c1, in_channel, 2, "sp1")
with tf.variable_scope("c2"):
c2= self.conv_k3_s2(c1,in_channel)
c2 = tf.nn.relu(c2)
c2 = tf.layers.batch_normalization(c2,training = is_training)
c2=self.Squeeze_excitation_layer(c2, in_channel, 2, "sp2")
#c2 = tf.layers.dropout(c2, rate=0.5, training=is_training)
with tf.variable_scope("c2"):
c2_2= self.conv_k3_s2(c2,in_channel)
c2_2 = tf.nn.relu(c2_2)
c2_2 = tf.layers.batch_normalization(c2_2,training = is_training)
c2_2=self.Squeeze_excitation_layer(c2_2, in_channel, 2, "sp3")
#c2 = tf.layers.dropout(c2, rate=0.5, training=is_training)
with tf.variable_scope("dc1"):
c3= self.deconv_k3_s2(c2_2,in_channel)
c3 = tf.nn.relu(c3)
c3 = tf.layers.batch_normalization(c3,training = is_training)
c3=self.Squeeze_excitation_layer(c3, in_channel, 2, "dc1sp1")
#c3 = tf.layers.dropout(c3, rate=0.5, training=is_training)
with tf.variable_scope("dc2"):
c4= self.deconv_k3_s2(c3,in_channel)
c4 = tf.nn.relu(c4)
c4 = tf.layers.batch_normalization(c4,training = is_training)
c4=self.Squeeze_excitation_layer(c4, in_channel, 2, "dc1sp2")
with tf.variable_scope("dc3"):
c4= self.deconv_k3_s2(c4,in_channel)
c4 = tf.nn.relu(c4)
c4 = tf.layers.batch_normalization(c4,training = is_training)
c4=self.Squeeze_excitation_layer(c4, in_channel, 2, "dc1sp3")
#c4 = tf.layers.dropout(c4, rate=0.5, training=is_training)
fcn=tf.image.resize_images(c1,(H,W),0)
return fcn
def image_filter_block(self,rgb_f,is_training):
image_filter=[]
with tf.variable_scope("image_guided_filter"):
with tf.variable_scope("igf1"):
f1=self.small_fcn(rgb_f[0],is_training,"filter0")
print("imf1")
print(f1.shape)
with tf.variable_scope("igf2"):
f2=self.small_fcn(rgb_f[1],is_training,"filter2")
f2=f2+f1
print("imf2")
print(f2.shape)
with tf.variable_scope("igf3"):
f3=self.small_fcn(rgb_f[2],is_training,"filter3")
f3=f3+f2
print("imf3")
print(f3.shape)
with tf.variable_scope("igf4"):
f4=self.small_fcn(rgb_f[3],is_training,"filter4")
f4=f4+f3
print("imf4")
print(f4.shape)
with tf.variable_scope("igf5"):
f5=self.small_fcn(rgb_f[4],is_training,"filter5")
f5=f5+f4
print("imf5")
print(f5.shape)
image_filter.append(f1)
image_filter.append(f2)
image_filter.append(f3)
image_filter.append(f4)
image_filter.append(f5)
return image_filter
def sp_dense_block(self,sp_raw,image_guided,basic_channel,is_training,with_imgguided):
sp_feature=[]
with tf.variable_scope('basic_channel'):
sp_raw = self.conv_k3_s1(sp_raw,basic_channel)
sp_raw = tf.nn.relu(sp_raw)
sp_raw = tf.layers.batch_normalization(sp_raw,training = is_training)
with tf.variable_scope('sp_feature'):
p1 = self.add_layer('sp_dense_layer.{}'.format(1), sp_raw,is_training)
with tf.variable_scope('reduce1'):
p1 = self.conv_k3_s1(p1,64)
p1=p1+sp_raw
p1=self.Squeeze_excitation_layer(p1, 64, 2, "sp1")
if with_imgguided=='yes':
with tf.variable_scope('temp1'):
temp=tf.concat([p1,image_guided[0]],3)
p1= self.conv_k3_s1(temp,64)
print("sp1")
print(p1.shape)
p2 = self.add_layer('sp_dense_layer.{}'.format(2), p1,is_training)
#p2 = tf.layers.dropout(p2, rate=0.2, training=is_training)
with tf.variable_scope('reduce2'):
p2 = self.conv_k3_s1(p2,64)
p2=p2+p1
p2=self.Squeeze_excitation_layer(p2, 64, 2, "sp2")
if with_imgguided=='yes':
with tf.variable_scope('temp2'):
temp=tf.concat([p2,image_guided[1]],3)
p2= self.conv_k3_s1(temp,64)
print("sp2")
print(p2.shape)
p3 = self.add_layer('sp_dense_layer.{}'.format(3), p2,is_training)
#p3 = tf.layers.dropout(p3, rate=0.2, training=is_training)
with tf.variable_scope('reduce3'):
p3 = self.conv_k3_s1(p3,64)
p3=p3+p2
p3=self.Squeeze_excitation_layer(p3, 64, 2, "sp3")
if with_imgguided=='yes':
with tf.variable_scope('temp3'):
temp=tf.concat([p3,image_guided[2]],3)
p3= self.conv_k3_s1(temp,64)
print("sp3")
print(p3.shape)
p4 = self.add_layer('sp_dense_layer.{}'.format(4), p3,is_training)
#p4 = tf.layers.dropout(p4, rate=0.2, training=is_training)
with tf.variable_scope('reduce4'):
p4 = self.conv_k3_s1(p4,64)
p4=p4+p3
p4=self.Squeeze_excitation_layer(p4, 64, 2, "sp4")
if with_imgguided=='yes':
with tf.variable_scope('temp4'):
temp=tf.concat([p4,image_guided[3]],3)
p4= self.conv_k3_s1(temp,64)
print("sp4")
print(p4.shape)
p5 = self.add_layer('sp_dense_layer.{}'.format(5), p4,is_training)
#p5 = tf.layers.dropout(p5, rate=0.2, training=is_training)
with tf.variable_scope('reduce5'):
p5 = self.conv_k3_s1(p5,64)
p5=p5+p4
p5=self.Squeeze_excitation_layer(p5, 64, 2, "sp5")
#p5 = tf.nn.max_pool(p5,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME')
with tf.variable_scope('reduce_channel'):
#p5=self.conv_k1_s1(p5,100)
if with_imgguided=='yes':
with tf.variable_scope('temp5'):
temp=tf.concat([p5,image_guided[3]],3)
p5= self.conv_k3_s1(temp,64)
#p5 = tf.layers.dropout(p5, rate=0.2, training=is_training)
print("sp5")
print(p5.shape)
sp_feature.append(p1)
sp_feature.append(p2)
sp_feature.append(p3)
sp_feature.append(p4)
sp_feature.append(p5)
return sp_feature
def U_net_block(self,fusion,is_training):
with tf.variable_scope('U_BLOCK',reuse=tf.AUTO_REUSE):
with tf.variable_scope('U_1',reuse=tf.AUTO_REUSE):
u1 = self.conv_k3_s1(fusion,512)
u1 = tf.nn.relu(u1)
#u1=self.Squeeze_excitation_layer(u1, 512, 2, "u1")
u1 = tf.layers.batch_normalization(u1,training = is_training)
#u1 = tf.layers.dropout(u1, rate=0.8, training=is_training)
print("u_1")
print(u1.shape)
with tf.variable_scope('U_1_1',reuse=tf.AUTO_REUSE):
u1_1 = self.conv_k3_s1(fusion,512)
u1_1=u1_1+u1
u1_1 = tf.nn.relu(u1_1)
#u1_1=self.Squeeze_excitation_layer(u1_1, 512, 2, "u1_1")
u1_1 = tf.layers.batch_normalization(u1_1,training = is_training)
#u1_1 = tf.layers.dropout(u1_1, rate=0.8, training=is_training)
print("u_1_1")
print(u1_1.shape)
with tf.variable_scope('U_2',reuse=tf.AUTO_REUSE):
u2 = self.conv_k3_s2(u1_1,256)
u2 = tf.nn.relu(u2)
u2 = tf.layers.batch_normalization(u2,training = is_training)
#u2=self.Squeeze_excitation_layer(u2, 256, 2, "u2")
#u2 = tf.layers.dropout(u2, rate=0.8, training=is_training)
print("u_2")
print(u2.shape)
with tf.variable_scope('U_2_2',reuse=tf.AUTO_REUSE):
u2_2 = self.conv_k3_s1(u2,256)
u2_2=u2_2+u2
u2_2 = tf.nn.relu(u2_2)
u2_2 = tf.layers.batch_normalization(u2_2,training = is_training)
#u2_2=self.Squeeze_excitation_layer(u2_2, 256, 2, "u2_2")
#u2_2 = tf.layers.dropout(u2_2, rate=0.8, training=is_training)
print("u_2_2")
print(u2_2.shape)
with tf.variable_scope('U_3',reuse=tf.AUTO_REUSE):
u3 = self.conv_k3_s2(u2_2,64)
u3 = tf.nn.relu(u3)
u3 = tf.layers.batch_normalization(u3,training = is_training)
#u3=self.Squeeze_excitation_layer(u3, 64, 2, "u3")
#u3 = tf.layers.dropout(u3, rate=0.8, training=is_training)
print("u_3")
print(u3.shape)
with tf.variable_scope('U_3_3',reuse=tf.AUTO_REUSE):
u3_3 = self.conv_k3_s1(u3,64)
u3_3=u3_3+u3
u3_3 = tf.nn.relu(u3_3)
u3_3 = tf.layers.batch_normalization(u3_3,training = is_training)
#u3_3=self.Squeeze_excitation_layer(u3_3, 64, 2, "u3_3")
#u3_3 = tf.layers.dropout(u3_3, rate=0.8, training=is_training)
print("u_3_3")
print(u3_3.shape)
with tf.variable_scope('U_4',reuse=tf.AUTO_REUSE):
u4 = self.deconv_k3_s2(u3_3,32)
u4 = tf.nn.relu(u4)
u4 = tf.layers.batch_normalization(u4,training = is_training)
#u4=self.Squeeze_excitation_layer(u4, 32, 2, "u3_3")
#u4 = tf.layers.dropout(u4, rate=0.8, training=is_training)
print("u_4")
print(u4.shape)
with tf.variable_scope('U_4_4',reuse=tf.AUTO_REUSE):
u4_4 = self.conv_k3_s1(u4,32)
u4_4=u4_4+u4
u4_4 = tf.nn.relu(u4_4)
u4_4 = tf.layers.batch_normalization(u4_4,training = is_training)
#u4_4=self.Squeeze_excitation_layer(u4_4, 32, 2, "u4_4")
#u4_4 = tf.layers.dropout(u4_4, rate=0.8, training=is_training)
u2_2=tf.image.resize_images(u2_2,(48,156),0)
u4_4=tf.concat([u4_4, u2_2], 3)
print("u_4_4")
print(u4_4.shape)
with tf.variable_scope('U_5',reuse=tf.AUTO_REUSE):
u5 = self.deconv_k3_s2(u4_4,16)
u5 = tf.nn.relu(u5)
u5 = tf.layers.batch_normalization(u5,training = is_training)
#u5=self.Squeeze_excitation_layer(u5, 16, 2, "u5")
#u5 = tf.layers.dropout(u5, rate=0.8, training=is_training)
print("u_5")
print(u5.shape)
with tf.variable_scope('U_5_5',reuse=tf.AUTO_REUSE):
u5_5 = self.conv_k3_s1(u5,16)
u5_5=u5_5+u5
u5_5 = tf.nn.relu(u5_5)
u5_5 = tf.layers.batch_normalization(u5_5,training = is_training)
#u5_5 = tf.layers.dropout(u5_5, rate=0.8, training=is_training)
#u5_5=self.Squeeze_excitation_layer(u5_5, 16, 2, "u5_5")
u1_1=tf.image.resize_images(u1_1,(96,312),0)
u5_5=tf.concat([u5_5, u1_1], 3)
print("u_5_5")
print(u5_5.shape)
with tf.variable_scope('U_6',reuse=tf.AUTO_REUSE):
u6 = self.deconv_k3_s2(u5_5,8)
u6 = tf.nn.relu(u6)
u6 = tf.layers.batch_normalization(u6,training = is_training)
#u6=self.Squeeze_excitation_layer(u6, 8, 2, "u6")
#u6 = tf.layers.dropout(u6, rate=0.8, training=is_training)
print("u_6")
print(u6.shape)
with tf.variable_scope('U_6_6',reuse=tf.AUTO_REUSE):
u6_6 = self.conv_k3_s1(u6,8)
u6_6=u6_6+u6
u6_6 = tf.nn.relu(u6_6)
u6_6 = tf.layers.batch_normalization(u6_6,training = is_training)
#u6_6=self.Squeeze_excitation_layer(u6_6, 8, 2, "u6_6")
#u6_6 = tf.layers.dropout(u6_6, rate=0.8, training=is_training)
print("u_6_6")
print(u6_6.shape)
with tf.variable_scope('U_7',reuse=tf.AUTO_REUSE):
u7 = self.deconv_k3_s2(u6_6,4)
u7 = tf.nn.relu(u7)
u7 = tf.layers.batch_normalization(u7,training = is_training)
#u7=self.Squeeze_excitation_layer(u7, 4, 2, "u7")
#u7 = tf.layers.dropout(u7, rate=0.8, training=is_training)
print("u_7")
print(u7.shape)
with tf.variable_scope('U_7_7',reuse=tf.AUTO_REUSE):
u7_7 = self.conv_k3_s1(u7,4)
u7_7=u7_7+u7
u7_7 = tf.nn.relu(u7_7)
u7_7 = tf.layers.batch_normalization(u7_7,training = is_training)
#u7_7=self.Squeeze_excitation_layer(u7_7, 4, 2, "u7_7")
#u7_7 = tf.layers.dropout(u7_7, rate=0.8, training=is_training)
print("u_7_7")
print(u7_7.shape)
with tf.variable_scope('regresion',reuse=tf.AUTO_REUSE):
re = self.conv_k1_s1(u7_7,1)
re = tf.nn.relu(re)
re=tf.image.resize_images(re,(self.IMG_H,self.IMG_W),0)
print("regresion")
print(re.shape)
return re
def network(self,rgb,sp,is_training):
print("input_rgb",rgb.shape)
print("input_sp",sp.shape)
print("exract feature from rgb images")
with tf.variable_scope('rgb',reuse=tf.AUTO_REUSE):
rgb=self.conv_k3_s2(rgb,32)
rgb = tf.nn.relu(rgb)
rgb=tf.layers.batch_normalization(rgb,training = is_training)
#rgb = tf.layers.dropout(rgb, rate=0.2, training=is_training)
print("rgb downsample1")
print(rgb.shape)
with tf.variable_scope('rgb2',reuse=tf.AUTO_REUSE):
rgb=self.conv_k3_s2(rgb,64)
rgb = tf.nn.relu(rgb)
rgb=tf.layers.batch_normalization(rgb,training = is_training)
print("rgb downsample2")
print(rgb.shape)
#rgb = tf.layers.dropout(rgb, rate=0.2, training=is_training)
with tf.variable_scope('sp',reuse=tf.AUTO_REUSE):
sp=self.conv_k3_s2(sp,32)
sp = tf.nn.relu(sp)
sp=tf.layers.batch_normalization(sp,training = is_training)
#sp = tf.layers.dropout(sp, rate=0.2, training=is_training)
with tf.variable_scope('sp2',reuse=tf.AUTO_REUSE):
sp=self.conv_k3_s2(sp,64)
sp = tf.nn.relu(sp)
sp=tf.layers.batch_normalization(sp,training = is_training)
#sp = tf.layers.dropout(sp, rate=0.2, training=is_training)
with tf.variable_scope('rgb_feature_exract',reuse=tf.AUTO_REUSE):
rgb_feature=self.rgb_dense_block(rgb,64,is_training)
with tf.variable_scope('rgb_predict',reuse=tf.AUTO_REUSE):
with tf.variable_scope('deconv1',reuse=tf.AUTO_REUSE):
rgb_predict=self.deconv_k3_s2(rgb_feature[4],128)
rgb_predict = tf.nn.relu(rgb_predict)
rgb_predict=tf.layers.batch_normalization(rgb_predict,training = is_training)
print("upsample1")
print(rgb_predict.shape)
with tf.variable_scope('deconv2',reuse=tf.AUTO_REUSE):
rgb_predict=self.deconv_k3_s2(rgb_predict,32)
rgb_predict = tf.nn.relu(rgb_predict)
rgb_predict=tf.layers.batch_normalization(rgb_predict,training = is_training)
print("upsample2")
print(rgb_predict.shape)
with tf.variable_scope('rgb_regresion',reuse=tf.AUTO_REUSE):
rgb_predict=self.conv_k1_s1(rgb_predict,1)
rgb_predict = tf.nn.relu(rgb_predict)
rgb_predict=tf.image.resize_images(rgb_predict,(self.IMG_H,self.IMG_W),0)
print("rgb_predict")
print(rgb_predict.shape)
with tf.variable_scope('image_guided_filter',reuse=tf.AUTO_REUSE):
igf=self.image_filter_block(rgb_feature,is_training)
print("exract feature from spare depth images")
with tf.variable_scope('sp_feature_exract',reuse=tf.AUTO_REUSE):
sp_feature=self.sp_dense_block(sp,igf,64,is_training,'yes')
with tf.variable_scope('sp_predict',reuse=tf.AUTO_REUSE):
with tf.variable_scope('spdeconv1',reuse=tf.AUTO_REUSE):
sp_predict=self.deconv_k3_s2(sp_feature[4],128)
sp_predict = tf.nn.relu(sp_predict)
sp_predict=tf.layers.batch_normalization(sp_predict,training = is_training)
print("spupsample1")
print(sp_predict.shape)
with tf.variable_scope('deconv2',reuse=tf.AUTO_REUSE):
sp_predict=self.deconv_k3_s2(sp_predict,32)
sp_predict = tf.nn.relu(sp_predict)
sp_predict=tf.layers.batch_normalization(sp_predict,training = is_training)
print("spupsample2")
print(sp_predict.shape)
with tf.variable_scope('sp_regresion',reuse=tf.AUTO_REUSE):
sp_predict=self.conv_k1_s1(sp_predict,1)
sp_predict = tf.nn.relu(sp_predict)
sp_predict=tf.image.resize_images(sp_predict,(self.IMG_H,self.IMG_W),0)
print("sp_predict")
print(sp_predict.shape)
with tf.variable_scope('fusion_feature_exract',reuse=tf.AUTO_REUSE):
fusion=tf.concat([sp_feature[4],rgb_feature[4]],axis=3)
#fusion=self.sp_dense_block(sp,igf,64,is_training,'yes')
with tf.variable_scope('fusion_predict',reuse=tf.AUTO_REUSE):
with tf.variable_scope('fusiondeconv1',reuse=tf.AUTO_REUSE):
fusion=self.deconv_k3_s2(fusion,128)
fusion = tf.nn.relu(fusion)
fusion=tf.layers.batch_normalization(fusion,training = is_training)
print("fusionupsample1")
print(fusion.shape)
with tf.variable_scope('fusiondeconv2',reuse=tf.AUTO_REUSE):
fusion2=self.conv_k3_s1(fusion,128)
fusion2=fusion2+fusion
fusion2 = tf.nn.relu(fusion2)
fusion2=tf.layers.batch_normalization(fusion2,training = is_training)
print("fusionupsample2")
print(fusion2.shape)
with tf.variable_scope('fusiondeconv3',reuse=tf.AUTO_REUSE):
fusion3=self.deconv_k3_s2(fusion2,64)
fusion3 = tf.nn.relu(fusion3)
fusion3=tf.layers.batch_normalization(fusion3,training = is_training)
print("fusionupsample2")
print(fusion3.shape)
with tf.variable_scope('fusiondeconv4',reuse=tf.AUTO_REUSE):
fusion4=self.conv_k3_s1(fusion3,64)
fusion4=fusion4+fusion3
fusion4 = tf.nn.relu(fusion4)
fusion4=tf.layers.batch_normalization(fusion4,training = is_training)
print("fusionupsample2")
print(fusion4.shape)
with tf.variable_scope('fusion_regresion1',reuse=tf.AUTO_REUSE):
fusion5=self.conv_k3_s1(fusion4,32)
fusion5 = tf.nn.relu(fusion5)
print("fusion_predict")
print(fusion5.shape)
with tf.variable_scope('fusion_regresion2',reuse=tf.AUTO_REUSE):
fusion6=self.conv_k3_s1(fusion5,8)
fusion6 = tf.nn.relu(fusion6)
print("fusion_predict2")
print(fusion6.shape)
with tf.variable_scope('fusion_regresion3',reuse=tf.AUTO_REUSE):
fusion7=self.conv_k1_s1(fusion6,1)
fusion7 = tf.nn.relu(fusion7)
fusion7=tf.image.resize_images(fusion7,(self.IMG_H,self.IMG_W),0)
print("fusion_predict")
print(fusion7.shape)
return fusion7,rgb_predict,sp_predict,igf,self.se_block
def network2(self,x,sp,is_training):
print("input",x.shape)
with tf.variable_scope('layer1',reuse=tf.AUTO_REUSE) as scope:
with tf.variable_scope('rgb',reuse=tf.AUTO_REUSE):
x=self.basic_conv_block(x,32,3,2,"same",is_training)
#ttt=self.local_feature_exact(x)
#print(ttt)
with tf.variable_scope('sp',reuse=tf.AUTO_REUSE):
sp=self.basic_conv_block(sp,32,3,2,"same",is_training)
print("layer1",x.shape)
with tf.variable_scope('Convlayer2',reuse=tf.AUTO_REUSE) as scope:
with tf.variable_scope('rgb2',reuse=tf.AUTO_REUSE):
x=self.conv_res(x,32,2,is_training)
with tf.variable_scope('sp2',reuse=tf.AUTO_REUSE):
sp=self.conv_res(sp,32,2,is_training)
print("Reslayer2",x.shape)
with tf.variable_scope('Convlayer3',reuse=tf.AUTO_REUSE) as scope:
with tf.variable_scope('rgb3',reuse=tf.AUTO_REUSE):
x=self.conv_res(x,64,3,is_training)
with tf.variable_scope('sp3',reuse=tf.AUTO_REUSE):
sp=self.conv_res(sp,64,3,is_training)
print("Reslayer3",x.shape)
with tf.variable_scope('Convlayer4',reuse=tf.AUTO_REUSE) as scope:
with tf.variable_scope('rgb4',reuse=tf.AUTO_REUSE):
x=self.conv_res(x,128,3,is_training)
with tf.variable_scope('sp4',reuse=tf.AUTO_REUSE):
sp=self.conv_res(sp,128,3,is_training)
print("Reslayer4",x.shape)
with tf.variable_scope('Convlayer5',reuse=tf.AUTO_REUSE) as scope:
with tf.variable_scope('rgb5',reuse=tf.AUTO_REUSE):
x=self.conv_res(x,256,3,is_training)
with tf.variable_scope('sp5',reuse=tf.AUTO_REUSE):
sp=self.conv_res(sp,256,3,is_training)
print("Reslayer5",x.shape)
with tf.variable_scope('Convlayer6',reuse=tf.AUTO_REUSE) as scope:
with tf.variable_scope('rgb6',reuse=tf.AUTO_REUSE):
x=self.conv_res(x,512,3,is_training)
with tf.variable_scope('sp6',reuse=tf.AUTO_REUSE):
sp=self.conv_res(sp,512,3,is_training)
print("Reslayer6",x.shape)
with tf.variable_scope('Convlayer66',reuse=tf.AUTO_REUSE) as scope:
with tf.variable_scope('rgb66',reuse=tf.AUTO_REUSE):
x=self.conv_res(x,512,3,is_training)
with tf.variable_scope('sp66',reuse=tf.AUTO_REUSE):
sp=self.conv_res(sp,512,3,is_training)
print("Reslayer6",x.shape)
with tf.variable_scope('DeConvlayer7',reuse=tf.AUTO_REUSE) as scope:
with tf.variable_scope('rgb66',reuse=tf.AUTO_REUSE):
x=self.deconv_res(x,256,3,is_training)
with tf.variable_scope('sp66',reuse=tf.AUTO_REUSE):
sp=self.deconv_res(sp,256,3,is_training)
print("Reslayer7",x.shape)
with tf.variable_scope('DeConvlayer8',reuse=tf.AUTO_REUSE) as scope:
with tf.variable_scope('rgb7',reuse=tf.AUTO_REUSE):
x=self.deconv_res(x,128,3,is_training)
with tf.variable_scope('sp7',reuse=tf.AUTO_REUSE):
sp=self.deconv_res(sp,128,3,is_training)
print("Reslayer8",x.shape)
with tf.variable_scope('DeConvlayer9',reuse=tf.AUTO_REUSE) as scope:
with tf.variable_scope('rgb',reuse=tf.AUTO_REUSE):
x=self.deconv_res(x,64,3,is_training)
with tf.variable_scope('sp',reuse=tf.AUTO_REUSE):
sp=self.deconv_res(sp,64,3,is_training)
print("Reslayer9",x.shape)
with tf.variable_scope('DeConvlayer10',reuse=tf.AUTO_REUSE) as scope:
with tf.variable_scope('rgb',reuse=tf.AUTO_REUSE):
x=self.deconv_res(x,32,3,is_training)
with tf.variable_scope('sp',reuse=tf.AUTO_REUSE):
sp=self.deconv_res(sp,32,3,is_training)
xsp=tf.concat([x,sp],3)
print("Reslayer10",x.shape)
with tf.variable_scope('output',reuse=tf.AUTO_REUSE):
xsp=self.deconv_k3_s2(xsp,1)
print("output",xsp.shape)
self.xsp=xsp
return xsp
def network3(self,rgb,sp,is_training):
print("input_rgb",rgb.shape)
print("input_sp",sp.shape)
print("exract feature from rgb images")
with tf.variable_scope('rgb',reuse=tf.AUTO_REUSE):
rgb=self.conv_k3_s2(rgb,32)
rgb = tf.nn.relu(rgb)
rgb=tf.layers.batch_normalization(rgb,training = is_training)
rgb = tf.layers.dropout(rgb, rate=0.8, training=is_training)
print("rgb downsample1")
print(rgb.shape)
with tf.variable_scope('rgb2',reuse=tf.AUTO_REUSE):
rgb=self.conv_k3_s2(rgb,32)
rgb = tf.nn.relu(rgb)
rgb=tf.layers.batch_normalization(rgb,training = is_training)
print("rgb downsample2")
print(rgb.shape)
rgb = tf.layers.dropout(rgb, rate=0.8, training=is_training)
with tf.variable_scope('sp',reuse=tf.AUTO_REUSE):
sp=self.conv_k3_s2(sp,32)
sp = tf.nn.relu(sp)
sp=tf.layers.batch_normalization(sp,training = is_training)
sp = tf.layers.dropout(sp, rate=0.8, training=is_training)
with tf.variable_scope('sp2',reuse=tf.AUTO_REUSE):
sp=self.conv_k3_s2(sp,32)
sp = tf.nn.relu(sp)
sp=tf.layers.batch_normalization(sp,training = is_training)
with tf.variable_scope('rgb_feature_exract',reuse=tf.AUTO_REUSE):
rgb_feature=self.rgb_dense_block(rgb,64,is_training)
with tf.variable_scope('rgb_predict',reuse=tf.AUTO_REUSE):
with tf.variable_scope('deconv1',reuse=tf.AUTO_REUSE):
rgb_predict=self.deconv_k3_s2(rgb_feature[4],128)
rgb_predict = tf.nn.relu(rgb_predict)
rgb_predict=tf.layers.batch_normalization(rgb_predict,training = is_training)
print("upsample1")
print(rgb_predict.shape)
with tf.variable_scope('deconv2',reuse=tf.AUTO_REUSE):
rgb_predict=self.deconv_k3_s2(rgb_predict,32)
rgb_predict = tf.nn.relu(rgb_predict)
rgb_predict=tf.layers.batch_normalization(rgb_predict,training = is_training)
print("upsample2")
print(rgb_predict.shape)
with tf.variable_scope('rgb_regresion',reuse=tf.AUTO_REUSE):
rgb_predict=self.conv_k1_s1(rgb_predict,1)
rgb_predict = tf.nn.relu(rgb_predict)
rgb_predict=tf.image.resize_images(rgb_predict,(self.IMG_H,self.IMG_W),0)
print("rgb_predict")
print(rgb_predict.shape)
with tf.variable_scope('image_guided_filter',reuse=tf.AUTO_REUSE):
igf=self.image_filter_block(rgb_feature,is_training)
print("exract feature from spare depth images")
with tf.variable_scope('sp_feature_exract',reuse=tf.AUTO_REUSE):
sp_feature=self.sp_dense_block(sp,igf,64,is_training,'yes')
with tf.variable_scope('sp_predict',reuse=tf.AUTO_REUSE):
with tf.variable_scope('spdeconv1',reuse=tf.AUTO_REUSE):
sp_predict=self.deconv_k3_s2(sp_feature[4],128)
sp_predict = tf.nn.relu(sp_predict)
sp_predict=tf.layers.batch_normalization(sp_predict,training = is_training)
print("spupsample1")
print(sp_predict.shape)
with tf.variable_scope('deconv2',reuse=tf.AUTO_REUSE):
sp_predict=self.deconv_k3_s2(sp_predict,32)
sp_predict = tf.nn.relu(sp_predict)
sp_predict=tf.layers.batch_normalization(sp_predict,training = is_training)
print("spupsample2")
print(sp_predict.shape)
with tf.variable_scope('sp_regresion',reuse=tf.AUTO_REUSE):
sp_predict=self.conv_k1_s1(sp_predict,1)
sp_predict = tf.nn.relu(sp_predict)
sp_predict=tf.image.resize_images(sp_predict,(self.IMG_H,self.IMG_W),0)
print("sp_predict")
print(sp_predict.shape)
with tf.variable_scope('feature_fusion',reuse=tf.AUTO_REUSE):
fu=tf.concat([rgb_feature[4], sp_feature[4]], 3)
print("fusion shape")
print(fu.shape)
fu=self.Squeeze_excitation_layer(fu, 128, 2, "SE_BLOCK")
print("fusion SE")
print(fu.shape)
print("exract feature from spare depth images")
with tf.variable_scope('depth_predict',reuse=tf.AUTO_REUSE):
depth=self.U_net_block(fu,is_training)
return depth,rgb_predict,sp_predict
def loss(self,predictrgb,predictsp,predictfusion,groundtruth):
self.loss=tf.reduce_mean(tf.abs(tf.subtract(predictrgb,groundtruth)))+tf.reduce_mean(tf.abs(tf.subtract(predictsp,groundtruth)))+tf.reduce_mean(tf.abs(tf.subtract(predictfusion,groundtruth)))
return self.loss
def MAE_loss(self,predict,groundtruth):
maeloss=tf.reduce_mean(tf.abs(tf.subtract(predict,groundtruth)))
return maeloss
def iMAE_loss(self,predict,groundtruth):
#predict=tf.multiply(predict,tf.constant(0.000001))
#groundtruth=tf.multiply(groundtruth,tf.constant(0.000001))
#tf.where(condition,x=None,y=None,name=None)
predict2=tf.cast(predict,dtype=tf.float64)
predict2=predict2/1000000.0
groundtruth2=tf.cast(groundtruth,dtype=tf.float64)
groundtruth2=groundtruth2/1000000.0
imaeloss=tf.reduce_mean(tf.abs((groundtruth2-predict2)/(groundtruth2*predict2)))
return imaeloss
def RMSE_loss(self,predict,groundtruth):
rmseloss=tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(predict,groundtruth))))
return rmseloss
def iRMSE_loss(self,predict,groundtruth):
predict2=tf.cast(predict,dtype=tf.float64)
predict2=predict2/1000000.0
groundtruth2=tf.cast(groundtruth,dtype=tf.float64)
groundtruth2=groundtruth2/1000000.0
rmseloss=tf.sqrt(tf.reduce_mean(tf.square(tf.divide(tf.subtract(groundtruth2,predict2),tf.multiply(predict2,groundtruth2)))))
#rmseloss2=rmseloss/self.IMG_W=arg.IMG_W/self.IMG_W=arg.IMG_H/self.BATCH_SIZE
return rmseloss
def test_loss(self,predict,groundtruth):
test_loss=tf.reduce_mean(tf.abs(tf.subtract(predict,groundtruth)))
return test_loss
def optimize(self,learning_rate):
regularization_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
total_loss=regularization_loss+self.loss
update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(total_loss,global_step=self.global_step)
return train_opt,regularization_loss
# tf.train.GradientDescentOptimizer AdamOptimizer
if __name__ == '__main__':
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='depth completion')
parser.add_argument('--rgb_path',dest='rgb_path',default="E:/program/self-supervised-depth-completion-master/data/data_rgb/train/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/image_02/data")
parser.add_argument('--spare_depth_path',dest='spare_depth_path',default="E:/program/self-supervised-depth-completion-master/data/data_depth_velodyne/train/2011_09_26_drive_0001_sync/proj_depth/velodyne_raw/image_02")
parser.add_argument('--denth_depth_path',dest='denth_depth_path',default="E:/program/self-supervised-depth-completion-master/data/data_depth_annotated/train/2011_09_26_drive_0001_sync/proj_depth/groundtruth/image_02")
parser.add_argument('--BATCH_SIZE',dest='BATCH_SIZE',default=2)
parser.add_argument('--IMG_H',dest='IMG_H',default=300)
parser.add_argument('--IMG_W',dest='IMG_W',default=300)
args = parser.parse_args()
test_rgb='./005.png'
test_rgb_date=cv2.imread(test_rgb)
#print(test_rgb_date)
image_rgb=tf.convert_to_tensor(test_rgb_date)
image_rgb=tf.to_float(image_rgb, name='ToFloat')
image_rgb=tf.expand_dims(image_rgb,0)
model=Model(args)
testingmode = tf.constant(False,dtype=tf.bool)
trainingmode = tf.constant(False,dtype=tf.bool)
output=model.network(image_rgb,image_rgb,testingmode)
sess= tf.Session()
sess.run(tf.global_variables_initializer())
o=sess.run(output)
print("#################################")
print(o.shape)
plt.imshow(o[0,:,:,0])
plt.axis("off")
plt.show()
| [
"tensorflow.image.resize_images",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.multiply",
"tensorflow.truncated_normal_initializer",
"tensorflow.cast",
"matplotlib.pyplot.imshow",
"argparse.ArgumentParser",
"tensorflow.Session",
"tensorflow.nn.sigmoid",
"tensorflow.concat",
"tensorflo... | [((50538, 50593), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""depth completion"""'}), "(description='depth completion')\n", (50561, 50593), False, 'import argparse\n'), ((51496, 51516), 'cv2.imread', 'cv2.imread', (['test_rgb'], {}), '(test_rgb)\n', (51506, 51516), False, 'import cv2\n'), ((51553, 51588), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['test_rgb_date'], {}), '(test_rgb_date)\n', (51573, 51588), True, 'import tensorflow as tf\n'), ((51601, 51639), 'tensorflow.to_float', 'tf.to_float', (['image_rgb'], {'name': '"""ToFloat"""'}), "(image_rgb, name='ToFloat')\n", (51612, 51639), True, 'import tensorflow as tf\n'), ((51652, 51680), 'tensorflow.expand_dims', 'tf.expand_dims', (['image_rgb', '(0)'], {}), '(image_rgb, 0)\n', (51666, 51680), True, 'import tensorflow as tf\n'), ((51718, 51751), 'tensorflow.constant', 'tf.constant', (['(False)'], {'dtype': 'tf.bool'}), '(False, dtype=tf.bool)\n', (51729, 51751), True, 'import tensorflow as tf\n'), ((51768, 51801), 'tensorflow.constant', 'tf.constant', (['(False)'], {'dtype': 'tf.bool'}), '(False, dtype=tf.bool)\n', (51779, 51801), True, 'import tensorflow as tf\n'), ((51867, 51879), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (51877, 51879), True, 'import tensorflow as tf\n'), ((52011, 52036), 'matplotlib.pyplot.imshow', 'plt.imshow', (['o[0, :, :, 0]'], {}), '(o[0, :, :, 0])\n', (52021, 52036), True, 'import matplotlib.pyplot as plt\n'), ((52036, 52051), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (52044, 52051), True, 'import matplotlib.pyplot as plt\n'), ((52054, 52064), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (52062, 52064), True, 'import matplotlib.pyplot as plt\n'), ((240, 272), 'tensorflow.constant', 'tf.constant', (['(True)'], {'dtype': 'tf.bool'}), '(True, dtype=tf.bool)\n', (251, 272), True, 'import tensorflow as tf\n'), ((294, 327), 'tensorflow.constant', 'tf.constant', (['(False)'], {'dtype': 'tf.bool'}), '(False, dtype=tf.bool)\n', (305, 327), True, 'import tensorflow as tf\n'), ((353, 384), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (364, 384), True, 'import tensorflow as tf\n'), ((958, 1012), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['x'], {'training': 'is_training'}), '(x, training=is_training)\n', (987, 1012), True, 'import tensorflow as tf\n'), ((1021, 1034), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (1031, 1034), True, 'import tensorflow as tf\n'), ((1504, 1558), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['x'], {'training': 'is_training'}), '(x, training=is_training)\n', (1533, 1558), True, 'import tensorflow as tf\n'), ((1567, 1580), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (1577, 1580), True, 'import tensorflow as tf\n'), ((5498, 5511), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (5508, 5511), True, 'import tensorflow as tf\n'), ((5546, 5562), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['x'], {}), '(x)\n', (5559, 5562), True, 'import tensorflow as tf\n'), ((6250, 6295), 'tflearn.layers.conv.global_avg_pool', 'global_avg_pool', (['x'], {'name': '"""Global_avg_pooling"""'}), "(x, name='Global_avg_pooling')\n", (6265, 6295), False, 'from tflearn.layers.conv import global_avg_pool\n'), ((8049, 8151), 'tensorflow.nn.max_pool_with_argmax_and_mask', 'tf.nn.max_pool_with_argmax_and_mask', (['inp'], {'ksize': '[1, k, k, 1]', 'strides': '[1, k, k, 1]', 'padding': '"""SAME"""'}), "(inp, ksize=[1, k, k, 1], strides=[1, k,\n k, 1], padding='SAME')\n", (8084, 8151), True, 'import tensorflow as tf\n'), ((8217, 8321), 'tensorflow.nn.max_unpool', 'tf.nn.max_unpool', (['inp', 'argmax', 'argmax_mask'], {'ksize': '[1, k, k, 1]', 'strides': '[1, k, k, 1]', 'padding': '"""SAME"""'}), "(inp, argmax, argmax_mask, ksize=[1, k, k, 1], strides=[1,\n k, k, 1], padding='SAME')\n", (8233, 8321), True, 'import tensorflow as tf\n'), ((48939, 48973), 'tensorflow.cast', 'tf.cast', (['predict'], {'dtype': 'tf.float64'}), '(predict, dtype=tf.float64)\n', (48946, 48973), True, 'import tensorflow as tf\n'), ((49030, 49068), 'tensorflow.cast', 'tf.cast', (['groundtruth'], {'dtype': 'tf.float64'}), '(groundtruth, dtype=tf.float64)\n', (49037, 49068), True, 'import tensorflow as tf\n'), ((49421, 49455), 'tensorflow.cast', 'tf.cast', (['predict'], {'dtype': 'tf.float64'}), '(predict, dtype=tf.float64)\n', (49428, 49455), True, 'import tensorflow as tf\n'), ((49502, 49540), 'tensorflow.cast', 'tf.cast', (['groundtruth'], {'dtype': 'tf.float64'}), '(groundtruth, dtype=tf.float64)\n', (49509, 49540), True, 'import tensorflow as tf\n'), ((50137, 50179), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (50154, 50179), True, 'import tensorflow as tf\n'), ((51891, 51924), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (51922, 51924), True, 'import tensorflow as tf\n'), ((1641, 1693), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv_k3_s2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('conv_k3_s2', reuse=tf.AUTO_REUSE)\n", (1658, 1693), True, 'import tensorflow as tf\n'), ((3249, 3303), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""deconv_k3_s2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('deconv_k3_s2', reuse=tf.AUTO_REUSE)\n", (3266, 3303), True, 'import tensorflow as tf\n'), ((5093, 5148), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['x1'], {'training': 'is_training'}), '(x1, training=is_training)\n', (5122, 5148), True, 'import tensorflow as tf\n'), ((5159, 5173), 'tensorflow.nn.relu', 'tf.nn.relu', (['x1'], {}), '(x1)\n', (5169, 5173), True, 'import tensorflow as tf\n'), ((5333, 5388), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['x2'], {'training': 'is_training'}), '(x2, training=is_training)\n', (5362, 5388), True, 'import tensorflow as tf\n'), ((5399, 5413), 'tensorflow.nn.relu', 'tf.nn.relu', (['x2'], {}), '(x2)\n', (5409, 5413), True, 'import tensorflow as tf\n'), ((5815, 5870), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['x1'], {'training': 'is_training'}), '(x1, training=is_training)\n', (5844, 5870), True, 'import tensorflow as tf\n'), ((5881, 5895), 'tensorflow.nn.relu', 'tf.nn.relu', (['x1'], {}), '(x1)\n', (5891, 5895), True, 'import tensorflow as tf\n'), ((6061, 6116), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['x2'], {'training': 'is_training'}), '(x2, training=is_training)\n', (6090, 6116), True, 'import tensorflow as tf\n'), ((6127, 6141), 'tensorflow.nn.relu', 'tf.nn.relu', (['x2'], {}), '(x2)\n', (6137, 6141), True, 'import tensorflow as tf\n'), ((6379, 6404), 'tensorflow.name_scope', 'tf.name_scope', (['layer_name'], {}), '(layer_name)\n', (6392, 6404), True, 'import tensorflow as tf\n'), ((6418, 6471), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'x', 'use_bias': '(True)', 'units': 'units'}), '(inputs=x, use_bias=True, units=units)\n', (6433, 6471), True, 'import tensorflow as tf\n'), ((6556, 6581), 'tensorflow.name_scope', 'tf.name_scope', (['layer_name'], {}), '(layer_name)\n', (6569, 6581), True, 'import tensorflow as tf\n'), ((6966, 7009), 'tensorflow.reshape', 'tf.reshape', (['excitation', '[-1, 1, 1, out_dim]'], {}), '(excitation, [-1, 1, 1, out_dim])\n', (6976, 7009), True, 'import tensorflow as tf\n'), ((7241, 7264), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (7258, 7264), True, 'import tensorflow as tf\n'), ((8445, 8498), 'tensorflow.variable_scope', 'tf.variable_scope', (["(name + 'res1')"], {'reuse': 'tf.AUTO_REUSE'}), "(name + 'res1', reuse=tf.AUTO_REUSE)\n", (8462, 8498), True, 'import tensorflow as tf\n'), ((8599, 8612), 'tensorflow.nn.relu', 'tf.nn.relu', (['c'], {}), '(c)\n', (8609, 8612), True, 'import tensorflow as tf\n'), ((8638, 8692), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['c'], {'training': 'is_training'}), '(c, training=is_training)\n', (8667, 8692), True, 'import tensorflow as tf\n'), ((8805, 8858), 'tensorflow.variable_scope', 'tf.variable_scope', (["(name + 'res2')"], {'reuse': 'tf.AUTO_REUSE'}), "(name + 'res2', reuse=tf.AUTO_REUSE)\n", (8822, 8858), True, 'import tensorflow as tf\n'), ((8987, 9000), 'tensorflow.nn.relu', 'tf.nn.relu', (['c'], {}), '(c)\n', (8997, 9000), True, 'import tensorflow as tf\n'), ((9026, 9080), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['c'], {'training': 'is_training'}), '(c, training=is_training)\n', (9055, 9080), True, 'import tensorflow as tf\n'), ((9186, 9206), 'tensorflow.concat', 'tf.concat', (['[c, l]', '(3)'], {}), '([c, l], 3)\n', (9195, 9206), True, 'import tensorflow as tf\n'), ((9380, 9414), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""basic_channel"""'], {}), "('basic_channel')\n", (9397, 9414), True, 'import tensorflow as tf\n'), ((9513, 9532), 'tensorflow.nn.relu', 'tf.nn.relu', (['rgb_raw'], {}), '(rgb_raw)\n', (9523, 9532), True, 'import tensorflow as tf\n'), ((9562, 9622), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['rgb_raw'], {'training': 'is_training'}), '(rgb_raw, training=is_training)\n', (9591, 9622), True, 'import tensorflow as tf\n'), ((9644, 9676), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb_feature"""'], {}), "('rgb_feature')\n", (9661, 9676), True, 'import tensorflow as tf\n'), ((9791, 9819), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""reduce1"""'], {}), "('reduce1')\n", (9808, 9819), True, 'import tensorflow as tf\n'), ((10277, 10305), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""reduce2"""'], {}), "('reduce2')\n", (10294, 10305), True, 'import tensorflow as tf\n'), ((10738, 10766), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""reduce3"""'], {}), "('reduce3')\n", (10755, 10766), True, 'import tensorflow as tf\n'), ((11177, 11205), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""reduce4"""'], {}), "('reduce4')\n", (11194, 11205), True, 'import tensorflow as tf\n'), ((11616, 11644), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""reduce5"""'], {}), "('reduce5')\n", (11633, 11644), True, 'import tensorflow as tf\n'), ((11957, 11992), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""reduce_channel"""'], {}), "('reduce_channel')\n", (11974, 11992), True, 'import tensorflow as tf\n'), ((12733, 12756), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (12750, 12756), True, 'import tensorflow as tf\n'), ((15642, 15682), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""image_guided_filter"""'], {}), "('image_guided_filter')\n", (15659, 15682), True, 'import tensorflow as tf\n'), ((17266, 17300), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""basic_channel"""'], {}), "('basic_channel')\n", (17283, 17300), True, 'import tensorflow as tf\n'), ((17396, 17414), 'tensorflow.nn.relu', 'tf.nn.relu', (['sp_raw'], {}), '(sp_raw)\n', (17406, 17414), True, 'import tensorflow as tf\n'), ((17443, 17502), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['sp_raw'], {'training': 'is_training'}), '(sp_raw, training=is_training)\n', (17472, 17502), True, 'import tensorflow as tf\n'), ((17525, 17556), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp_feature"""'], {}), "('sp_feature')\n", (17542, 17556), True, 'import tensorflow as tf\n'), ((17675, 17703), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""reduce1"""'], {}), "('reduce1')\n", (17692, 17703), True, 'import tensorflow as tf\n'), ((18396, 18424), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""reduce2"""'], {}), "('reduce2')\n", (18413, 18424), True, 'import tensorflow as tf\n'), ((19113, 19141), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""reduce3"""'], {}), "('reduce3')\n", (19130, 19141), True, 'import tensorflow as tf\n'), ((19830, 19858), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""reduce4"""'], {}), "('reduce4')\n", (19847, 19858), True, 'import tensorflow as tf\n'), ((20547, 20575), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""reduce5"""'], {}), "('reduce5')\n", (20564, 20575), True, 'import tensorflow as tf\n'), ((20944, 20979), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""reduce_channel"""'], {}), "('reduce_channel')\n", (20961, 20979), True, 'import tensorflow as tf\n'), ((21798, 21847), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""U_BLOCK"""'], {'reuse': 'tf.AUTO_REUSE'}), "('U_BLOCK', reuse=tf.AUTO_REUSE)\n", (21815, 21847), True, 'import tensorflow as tf\n'), ((31683, 31728), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb', reuse=tf.AUTO_REUSE)\n", (31700, 31728), True, 'import tensorflow as tf\n'), ((31805, 31820), 'tensorflow.nn.relu', 'tf.nn.relu', (['rgb'], {}), '(rgb)\n', (31815, 31820), True, 'import tensorflow as tf\n'), ((31846, 31902), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['rgb'], {'training': 'is_training'}), '(rgb, training=is_training)\n', (31875, 31902), True, 'import tensorflow as tf\n'), ((32079, 32125), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb2', reuse=tf.AUTO_REUSE)\n", (32096, 32125), True, 'import tensorflow as tf\n'), ((32202, 32217), 'tensorflow.nn.relu', 'tf.nn.relu', (['rgb'], {}), '(rgb)\n', (32212, 32217), True, 'import tensorflow as tf\n'), ((32243, 32299), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['rgb'], {'training': 'is_training'}), '(rgb, training=is_training)\n', (32272, 32299), True, 'import tensorflow as tf\n'), ((32476, 32520), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp', reuse=tf.AUTO_REUSE)\n", (32493, 32520), True, 'import tensorflow as tf\n'), ((32594, 32608), 'tensorflow.nn.relu', 'tf.nn.relu', (['sp'], {}), '(sp)\n', (32604, 32608), True, 'import tensorflow as tf\n'), ((32633, 32688), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['sp'], {'training': 'is_training'}), '(sp, training=is_training)\n', (32662, 32688), True, 'import tensorflow as tf\n'), ((32779, 32824), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp2', reuse=tf.AUTO_REUSE)\n", (32796, 32824), True, 'import tensorflow as tf\n'), ((32898, 32912), 'tensorflow.nn.relu', 'tf.nn.relu', (['sp'], {}), '(sp)\n', (32908, 32912), True, 'import tensorflow as tf\n'), ((32937, 32992), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['sp'], {'training': 'is_training'}), '(sp, training=is_training)\n', (32966, 32992), True, 'import tensorflow as tf\n'), ((33083, 33143), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb_feature_exract"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb_feature_exract', reuse=tf.AUTO_REUSE)\n", (33100, 33143), True, 'import tensorflow as tf\n'), ((33248, 33301), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb_predict"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb_predict', reuse=tf.AUTO_REUSE)\n", (33265, 33301), True, 'import tensorflow as tf\n'), ((34578, 34639), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""image_guided_filter"""'], {'reuse': 'tf.AUTO_REUSE'}), "('image_guided_filter', reuse=tf.AUTO_REUSE)\n", (34595, 34639), True, 'import tensorflow as tf\n'), ((34779, 34838), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp_feature_exract"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp_feature_exract', reuse=tf.AUTO_REUSE)\n", (34796, 34838), True, 'import tensorflow as tf\n'), ((34950, 35002), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp_predict"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp_predict', reuse=tf.AUTO_REUSE)\n", (34967, 35002), True, 'import tensorflow as tf\n'), ((36246, 36309), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fusion_feature_exract"""'], {'reuse': 'tf.AUTO_REUSE'}), "('fusion_feature_exract', reuse=tf.AUTO_REUSE)\n", (36263, 36309), True, 'import tensorflow as tf\n'), ((36338, 36388), 'tensorflow.concat', 'tf.concat', (['[sp_feature[4], rgb_feature[4]]'], {'axis': '(3)'}), '([sp_feature[4], rgb_feature[4]], axis=3)\n', (36347, 36388), True, 'import tensorflow as tf\n'), ((36482, 36538), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fusion_predict"""'], {'reuse': 'tf.AUTO_REUSE'}), "('fusion_predict', reuse=tf.AUTO_REUSE)\n", (36499, 36538), True, 'import tensorflow as tf\n'), ((39501, 39549), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""layer1"""'], {'reuse': 'tf.AUTO_REUSE'}), "('layer1', reuse=tf.AUTO_REUSE)\n", (39518, 39549), True, 'import tensorflow as tf\n'), ((39880, 39932), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Convlayer2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Convlayer2', reuse=tf.AUTO_REUSE)\n", (39897, 39932), True, 'import tensorflow as tf\n'), ((40179, 40231), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Convlayer3"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Convlayer3', reuse=tf.AUTO_REUSE)\n", (40196, 40231), True, 'import tensorflow as tf\n'), ((40476, 40528), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Convlayer4"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Convlayer4', reuse=tf.AUTO_REUSE)\n", (40493, 40528), True, 'import tensorflow as tf\n'), ((40777, 40829), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Convlayer5"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Convlayer5', reuse=tf.AUTO_REUSE)\n", (40794, 40829), True, 'import tensorflow as tf\n'), ((41078, 41130), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Convlayer6"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Convlayer6', reuse=tf.AUTO_REUSE)\n", (41095, 41130), True, 'import tensorflow as tf\n'), ((41381, 41434), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Convlayer66"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Convlayer66', reuse=tf.AUTO_REUSE)\n", (41398, 41434), True, 'import tensorflow as tf\n'), ((41685, 41739), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""DeConvlayer7"""'], {'reuse': 'tf.AUTO_REUSE'}), "('DeConvlayer7', reuse=tf.AUTO_REUSE)\n", (41702, 41739), True, 'import tensorflow as tf\n'), ((41992, 42046), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""DeConvlayer8"""'], {'reuse': 'tf.AUTO_REUSE'}), "('DeConvlayer8', reuse=tf.AUTO_REUSE)\n", (42009, 42046), True, 'import tensorflow as tf\n'), ((42299, 42353), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""DeConvlayer9"""'], {'reuse': 'tf.AUTO_REUSE'}), "('DeConvlayer9', reuse=tf.AUTO_REUSE)\n", (42316, 42353), True, 'import tensorflow as tf\n'), ((42600, 42655), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""DeConvlayer10"""'], {'reuse': 'tf.AUTO_REUSE'}), "('DeConvlayer10', reuse=tf.AUTO_REUSE)\n", (42617, 42655), True, 'import tensorflow as tf\n'), ((42872, 42893), 'tensorflow.concat', 'tf.concat', (['[x, sp]', '(3)'], {}), '([x, sp], 3)\n', (42881, 42893), True, 'import tensorflow as tf\n'), ((42933, 42981), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""output"""'], {'reuse': 'tf.AUTO_REUSE'}), "('output', reuse=tf.AUTO_REUSE)\n", (42950, 42981), True, 'import tensorflow as tf\n'), ((43238, 43283), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb', reuse=tf.AUTO_REUSE)\n", (43255, 43283), True, 'import tensorflow as tf\n'), ((43360, 43375), 'tensorflow.nn.relu', 'tf.nn.relu', (['rgb'], {}), '(rgb)\n', (43370, 43375), True, 'import tensorflow as tf\n'), ((43401, 43457), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['rgb'], {'training': 'is_training'}), '(rgb, training=is_training)\n', (43430, 43457), True, 'import tensorflow as tf\n'), ((43486, 43540), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['rgb'], {'rate': '(0.8)', 'training': 'is_training'}), '(rgb, rate=0.8, training=is_training)\n', (43503, 43540), True, 'import tensorflow as tf\n'), ((43633, 43679), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb2', reuse=tf.AUTO_REUSE)\n", (43650, 43679), True, 'import tensorflow as tf\n'), ((43756, 43771), 'tensorflow.nn.relu', 'tf.nn.relu', (['rgb'], {}), '(rgb)\n', (43766, 43771), True, 'import tensorflow as tf\n'), ((43797, 43853), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['rgb'], {'training': 'is_training'}), '(rgb, training=is_training)\n', (43826, 43853), True, 'import tensorflow as tf\n'), ((43966, 44020), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['rgb'], {'rate': '(0.8)', 'training': 'is_training'}), '(rgb, rate=0.8, training=is_training)\n', (43983, 44020), True, 'import tensorflow as tf\n'), ((44029, 44073), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp', reuse=tf.AUTO_REUSE)\n", (44046, 44073), True, 'import tensorflow as tf\n'), ((44147, 44161), 'tensorflow.nn.relu', 'tf.nn.relu', (['sp'], {}), '(sp)\n', (44157, 44161), True, 'import tensorflow as tf\n'), ((44186, 44241), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['sp'], {'training': 'is_training'}), '(sp, training=is_training)\n', (44215, 44241), True, 'import tensorflow as tf\n'), ((44269, 44322), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['sp'], {'rate': '(0.8)', 'training': 'is_training'}), '(sp, rate=0.8, training=is_training)\n', (44286, 44322), True, 'import tensorflow as tf\n'), ((44331, 44376), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp2', reuse=tf.AUTO_REUSE)\n", (44348, 44376), True, 'import tensorflow as tf\n'), ((44450, 44464), 'tensorflow.nn.relu', 'tf.nn.relu', (['sp'], {}), '(sp)\n', (44460, 44464), True, 'import tensorflow as tf\n'), ((44489, 44544), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['sp'], {'training': 'is_training'}), '(sp, training=is_training)\n', (44518, 44544), True, 'import tensorflow as tf\n'), ((44554, 44614), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb_feature_exract"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb_feature_exract', reuse=tf.AUTO_REUSE)\n", (44571, 44614), True, 'import tensorflow as tf\n'), ((44719, 44772), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb_predict"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb_predict', reuse=tf.AUTO_REUSE)\n", (44736, 44772), True, 'import tensorflow as tf\n'), ((46049, 46110), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""image_guided_filter"""'], {'reuse': 'tf.AUTO_REUSE'}), "('image_guided_filter', reuse=tf.AUTO_REUSE)\n", (46066, 46110), True, 'import tensorflow as tf\n'), ((46250, 46309), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp_feature_exract"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp_feature_exract', reuse=tf.AUTO_REUSE)\n", (46267, 46309), True, 'import tensorflow as tf\n'), ((46421, 46473), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp_predict"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp_predict', reuse=tf.AUTO_REUSE)\n", (46438, 46473), True, 'import tensorflow as tf\n'), ((47693, 47749), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""feature_fusion"""'], {'reuse': 'tf.AUTO_REUSE'}), "('feature_fusion', reuse=tf.AUTO_REUSE)\n", (47710, 47749), True, 'import tensorflow as tf\n'), ((47796, 47841), 'tensorflow.concat', 'tf.concat', (['[rgb_feature[4], sp_feature[4]]', '(3)'], {}), '([rgb_feature[4], sp_feature[4]], 3)\n', (47805, 47841), True, 'import tensorflow as tf\n'), ((48136, 48191), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""depth_predict"""'], {'reuse': 'tf.AUTO_REUSE'}), "('depth_predict', reuse=tf.AUTO_REUSE)\n", (48153, 48191), True, 'import tensorflow as tf\n'), ((49134, 49195), 'tensorflow.abs', 'tf.abs', (['((groundtruth2 - predict2) / (groundtruth2 * predict2))'], {}), '((groundtruth2 - predict2) / (groundtruth2 * predict2))\n', (49140, 49195), True, 'import tensorflow as tf\n'), ((50024, 50077), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.REGULARIZATION_LOSSES'], {}), '(tf.GraphKeys.REGULARIZATION_LOSSES)\n', (50041, 50077), True, 'import tensorflow as tf\n'), ((752, 785), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (783, 785), True, 'import tensorflow as tf\n'), ((804, 837), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (835, 837), True, 'import tensorflow as tf\n'), ((857, 894), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (889, 894), True, 'import tensorflow as tf\n'), ((912, 949), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (944, 949), True, 'import tensorflow as tf\n'), ((1298, 1331), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (1329, 1331), True, 'import tensorflow as tf\n'), ((1350, 1383), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (1381, 1383), True, 'import tensorflow as tf\n'), ((1403, 1440), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (1435, 1440), True, 'import tensorflow as tf\n'), ((1458, 1495), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (1490, 1495), True, 'import tensorflow as tf\n'), ((2212, 2245), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (2243, 2245), True, 'import tensorflow as tf\n'), ((2264, 2297), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (2295, 2297), True, 'import tensorflow as tf\n'), ((2317, 2354), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (2349, 2354), True, 'import tensorflow as tf\n'), ((2372, 2409), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (2404, 2409), True, 'import tensorflow as tf\n'), ((2596, 2629), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (2627, 2629), True, 'import tensorflow as tf\n'), ((2648, 2681), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (2679, 2681), True, 'import tensorflow as tf\n'), ((2701, 2738), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (2733, 2738), True, 'import tensorflow as tf\n'), ((2756, 2793), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (2788, 2793), True, 'import tensorflow as tf\n'), ((2988, 3021), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (3019, 3021), True, 'import tensorflow as tf\n'), ((3040, 3073), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (3071, 3073), True, 'import tensorflow as tf\n'), ((3093, 3130), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (3125, 3130), True, 'import tensorflow as tf\n'), ((3148, 3185), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (3180, 3185), True, 'import tensorflow as tf\n'), ((3846, 3879), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (3877, 3879), True, 'import tensorflow as tf\n'), ((3898, 3931), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (3929, 3931), True, 'import tensorflow as tf\n'), ((3951, 3988), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (3983, 3988), True, 'import tensorflow as tf\n'), ((4006, 4043), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (4038, 4043), True, 'import tensorflow as tf\n'), ((4244, 4277), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (4275, 4277), True, 'import tensorflow as tf\n'), ((4296, 4329), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (4327, 4329), True, 'import tensorflow as tf\n'), ((4349, 4386), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (4381, 4386), True, 'import tensorflow as tf\n'), ((4404, 4441), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (4436, 4441), True, 'import tensorflow as tf\n'), ((4640, 4673), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (4671, 4673), True, 'import tensorflow as tf\n'), ((4692, 4725), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (4723, 4725), True, 'import tensorflow as tf\n'), ((4745, 4782), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (4777, 4782), True, 'import tensorflow as tf\n'), ((4800, 4837), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (4832, 4837), True, 'import tensorflow as tf\n'), ((12946, 12969), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""c1"""'], {}), "('c1')\n", (12963, 12969), True, 'import tensorflow as tf\n'), ((13062, 13076), 'tensorflow.nn.relu', 'tf.nn.relu', (['c1'], {}), '(c1)\n', (13072, 13076), True, 'import tensorflow as tf\n'), ((13107, 13162), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['c1'], {'training': 'is_training'}), '(c1, training=is_training)\n', (13136, 13162), True, 'import tensorflow as tf\n'), ((13296, 13319), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""c2"""'], {}), "('c2')\n", (13313, 13319), True, 'import tensorflow as tf\n'), ((13411, 13425), 'tensorflow.nn.relu', 'tf.nn.relu', (['c2'], {}), '(c2)\n', (13421, 13425), True, 'import tensorflow as tf\n'), ((13456, 13511), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['c2'], {'training': 'is_training'}), '(c2, training=is_training)\n', (13485, 13511), True, 'import tensorflow as tf\n'), ((13734, 13757), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""c2"""'], {}), "('c2')\n", (13751, 13757), True, 'import tensorflow as tf\n'), ((13853, 13869), 'tensorflow.nn.relu', 'tf.nn.relu', (['c2_2'], {}), '(c2_2)\n', (13863, 13869), True, 'import tensorflow as tf\n'), ((13902, 13959), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['c2_2'], {'training': 'is_training'}), '(c2_2, training=is_training)\n', (13931, 13959), True, 'import tensorflow as tf\n'), ((14212, 14236), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dc1"""'], {}), "('dc1')\n", (14229, 14236), True, 'import tensorflow as tf\n'), ((14332, 14346), 'tensorflow.nn.relu', 'tf.nn.relu', (['c3'], {}), '(c3)\n', (14342, 14346), True, 'import tensorflow as tf\n'), ((14377, 14432), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['c3'], {'training': 'is_training'}), '(c3, training=is_training)\n', (14406, 14432), True, 'import tensorflow as tf\n'), ((14632, 14656), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dc2"""'], {}), "('dc2')\n", (14649, 14656), True, 'import tensorflow as tf\n'), ((14750, 14764), 'tensorflow.nn.relu', 'tf.nn.relu', (['c4'], {}), '(c4)\n', (14760, 14764), True, 'import tensorflow as tf\n'), ((14795, 14850), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['c4'], {'training': 'is_training'}), '(c4, training=is_training)\n', (14824, 14850), True, 'import tensorflow as tf\n'), ((14991, 15015), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dc3"""'], {}), "('dc3')\n", (15008, 15015), True, 'import tensorflow as tf\n'), ((15109, 15123), 'tensorflow.nn.relu', 'tf.nn.relu', (['c4'], {}), '(c4)\n', (15119, 15123), True, 'import tensorflow as tf\n'), ((15154, 15209), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['c4'], {'training': 'is_training'}), '(c4, training=is_training)\n', (15183, 15209), True, 'import tensorflow as tf\n'), ((15412, 15449), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['c1', '(H, W)', '(0)'], {}), '(c1, (H, W), 0)\n', (15434, 15449), True, 'import tensorflow as tf\n'), ((15709, 15734), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""igf1"""'], {}), "('igf1')\n", (15726, 15734), True, 'import tensorflow as tf\n'), ((15919, 15944), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""igf2"""'], {}), "('igf2')\n", (15936, 15944), True, 'import tensorflow as tf\n'), ((16158, 16183), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""igf3"""'], {}), "('igf3')\n", (16175, 16183), True, 'import tensorflow as tf\n'), ((16395, 16420), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""igf4"""'], {}), "('igf4')\n", (16412, 16420), True, 'import tensorflow as tf\n'), ((16632, 16657), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""igf5"""'], {}), "('igf5')\n", (16649, 16657), True, 'import tensorflow as tf\n'), ((21878, 21923), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""U_1"""'], {'reuse': 'tf.AUTO_REUSE'}), "('U_1', reuse=tf.AUTO_REUSE)\n", (21895, 21923), True, 'import tensorflow as tf\n'), ((22028, 22042), 'tensorflow.nn.relu', 'tf.nn.relu', (['u1'], {}), '(u1)\n', (22038, 22042), True, 'import tensorflow as tf\n'), ((22166, 22221), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['u1'], {'training': 'is_training'}), '(u1, training=is_training)\n', (22195, 22221), True, 'import tensorflow as tf\n'), ((22441, 22488), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""U_1_1"""'], {'reuse': 'tf.AUTO_REUSE'}), "('U_1_1', reuse=tf.AUTO_REUSE)\n", (22458, 22488), True, 'import tensorflow as tf\n'), ((22643, 22659), 'tensorflow.nn.relu', 'tf.nn.relu', (['u1_1'], {}), '(u1_1)\n', (22653, 22659), True, 'import tensorflow as tf\n'), ((22791, 22848), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['u1_1'], {'training': 'is_training'}), '(u1_1, training=is_training)\n', (22820, 22848), True, 'import tensorflow as tf\n'), ((23110, 23155), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""U_2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('U_2', reuse=tf.AUTO_REUSE)\n", (23127, 23155), True, 'import tensorflow as tf\n'), ((23258, 23272), 'tensorflow.nn.relu', 'tf.nn.relu', (['u2'], {}), '(u2)\n', (23268, 23272), True, 'import tensorflow as tf\n'), ((23311, 23366), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['u2'], {'training': 'is_training'}), '(u2, training=is_training)\n', (23340, 23366), True, 'import tensorflow as tf\n'), ((23697, 23744), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""U_2_2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('U_2_2', reuse=tf.AUTO_REUSE)\n", (23714, 23744), True, 'import tensorflow as tf\n'), ((23895, 23911), 'tensorflow.nn.relu', 'tf.nn.relu', (['u2_2'], {}), '(u2_2)\n', (23905, 23911), True, 'import tensorflow as tf\n'), ((23952, 24009), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['u2_2'], {'training': 'is_training'}), '(u2_2, training=is_training)\n', (23981, 24009), True, 'import tensorflow as tf\n'), ((24362, 24407), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""U_3"""'], {'reuse': 'tf.AUTO_REUSE'}), "('U_3', reuse=tf.AUTO_REUSE)\n", (24379, 24407), True, 'import tensorflow as tf\n'), ((24509, 24523), 'tensorflow.nn.relu', 'tf.nn.relu', (['u3'], {}), '(u3)\n', (24519, 24523), True, 'import tensorflow as tf\n'), ((24562, 24617), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['u3'], {'training': 'is_training'}), '(u3, training=is_training)\n', (24591, 24617), True, 'import tensorflow as tf\n'), ((24947, 24994), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""U_3_3"""'], {'reuse': 'tf.AUTO_REUSE'}), "('U_3_3', reuse=tf.AUTO_REUSE)\n", (24964, 24994), True, 'import tensorflow as tf\n'), ((25144, 25160), 'tensorflow.nn.relu', 'tf.nn.relu', (['u3_3'], {}), '(u3_3)\n', (25154, 25160), True, 'import tensorflow as tf\n'), ((25201, 25258), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['u3_3'], {'training': 'is_training'}), '(u3_3, training=is_training)\n', (25230, 25258), True, 'import tensorflow as tf\n'), ((25610, 25655), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""U_4"""'], {'reuse': 'tf.AUTO_REUSE'}), "('U_4', reuse=tf.AUTO_REUSE)\n", (25627, 25655), True, 'import tensorflow as tf\n'), ((25759, 25773), 'tensorflow.nn.relu', 'tf.nn.relu', (['u4'], {}), '(u4)\n', (25769, 25773), True, 'import tensorflow as tf\n'), ((25812, 25867), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['u4'], {'training': 'is_training'}), '(u4, training=is_training)\n', (25841, 25867), True, 'import tensorflow as tf\n'), ((26199, 26246), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""U_4_4"""'], {'reuse': 'tf.AUTO_REUSE'}), "('U_4_4', reuse=tf.AUTO_REUSE)\n", (26216, 26246), True, 'import tensorflow as tf\n'), ((26396, 26412), 'tensorflow.nn.relu', 'tf.nn.relu', (['u4_4'], {}), '(u4_4)\n', (26406, 26412), True, 'import tensorflow as tf\n'), ((26453, 26510), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['u4_4'], {'training': 'is_training'}), '(u4_4, training=is_training)\n', (26482, 26510), True, 'import tensorflow as tf\n'), ((26771, 26813), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['u2_2', '(48, 156)', '(0)'], {}), '(u2_2, (48, 156), 0)\n', (26793, 26813), True, 'import tensorflow as tf\n'), ((26849, 26875), 'tensorflow.concat', 'tf.concat', (['[u4_4, u2_2]', '(3)'], {}), '([u4_4, u2_2], 3)\n', (26858, 26875), True, 'import tensorflow as tf\n'), ((27073, 27118), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""U_5"""'], {'reuse': 'tf.AUTO_REUSE'}), "('U_5', reuse=tf.AUTO_REUSE)\n", (27090, 27118), True, 'import tensorflow as tf\n'), ((27222, 27236), 'tensorflow.nn.relu', 'tf.nn.relu', (['u5'], {}), '(u5)\n', (27232, 27236), True, 'import tensorflow as tf\n'), ((27275, 27330), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['u5'], {'training': 'is_training'}), '(u5, training=is_training)\n', (27304, 27330), True, 'import tensorflow as tf\n'), ((27668, 27715), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""U_5_5"""'], {'reuse': 'tf.AUTO_REUSE'}), "('U_5_5', reuse=tf.AUTO_REUSE)\n", (27685, 27715), True, 'import tensorflow as tf\n'), ((27865, 27881), 'tensorflow.nn.relu', 'tf.nn.relu', (['u5_5'], {}), '(u5_5)\n', (27875, 27881), True, 'import tensorflow as tf\n'), ((27922, 27979), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['u5_5'], {'training': 'is_training'}), '(u5_5, training=is_training)\n', (27951, 27979), True, 'import tensorflow as tf\n'), ((28240, 28282), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['u1_1', '(96, 312)', '(0)'], {}), '(u1_1, (96, 312), 0)\n', (28262, 28282), True, 'import tensorflow as tf\n'), ((28318, 28344), 'tensorflow.concat', 'tf.concat', (['[u5_5, u1_1]', '(3)'], {}), '([u5_5, u1_1], 3)\n', (28327, 28344), True, 'import tensorflow as tf\n'), ((28508, 28553), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""U_6"""'], {'reuse': 'tf.AUTO_REUSE'}), "('U_6', reuse=tf.AUTO_REUSE)\n", (28525, 28553), True, 'import tensorflow as tf\n'), ((28656, 28670), 'tensorflow.nn.relu', 'tf.nn.relu', (['u6'], {}), '(u6)\n', (28666, 28670), True, 'import tensorflow as tf\n'), ((28709, 28764), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['u6'], {'training': 'is_training'}), '(u6, training=is_training)\n', (28738, 28764), True, 'import tensorflow as tf\n'), ((29135, 29182), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""U_6_6"""'], {'reuse': 'tf.AUTO_REUSE'}), "('U_6_6', reuse=tf.AUTO_REUSE)\n", (29152, 29182), True, 'import tensorflow as tf\n'), ((29331, 29347), 'tensorflow.nn.relu', 'tf.nn.relu', (['u6_6'], {}), '(u6_6)\n', (29341, 29347), True, 'import tensorflow as tf\n'), ((29388, 29445), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['u6_6'], {'training': 'is_training'}), '(u6_6, training=is_training)\n', (29417, 29445), True, 'import tensorflow as tf\n'), ((29830, 29875), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""U_7"""'], {'reuse': 'tf.AUTO_REUSE'}), "('U_7', reuse=tf.AUTO_REUSE)\n", (29847, 29875), True, 'import tensorflow as tf\n'), ((29978, 29992), 'tensorflow.nn.relu', 'tf.nn.relu', (['u7'], {}), '(u7)\n', (29988, 29992), True, 'import tensorflow as tf\n'), ((30031, 30086), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['u7'], {'training': 'is_training'}), '(u7, training=is_training)\n', (30060, 30086), True, 'import tensorflow as tf\n'), ((30449, 30496), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""U_7_7"""'], {'reuse': 'tf.AUTO_REUSE'}), "('U_7_7', reuse=tf.AUTO_REUSE)\n", (30466, 30496), True, 'import tensorflow as tf\n'), ((30645, 30661), 'tensorflow.nn.relu', 'tf.nn.relu', (['u7_7'], {}), '(u7_7)\n', (30655, 30661), True, 'import tensorflow as tf\n'), ((30702, 30759), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['u7_7'], {'training': 'is_training'}), '(u7_7, training=is_training)\n', (30731, 30759), True, 'import tensorflow as tf\n'), ((31136, 31187), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""regresion"""'], {'reuse': 'tf.AUTO_REUSE'}), "('regresion', reuse=tf.AUTO_REUSE)\n", (31153, 31187), True, 'import tensorflow as tf\n'), ((31288, 31302), 'tensorflow.nn.relu', 'tf.nn.relu', (['re'], {}), '(re)\n', (31298, 31302), True, 'import tensorflow as tf\n'), ((31339, 31394), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['re', '(self.IMG_H, self.IMG_W)', '(0)'], {}), '(re, (self.IMG_H, self.IMG_W), 0)\n', (31361, 31394), True, 'import tensorflow as tf\n'), ((33328, 33377), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""deconv1"""'], {'reuse': 'tf.AUTO_REUSE'}), "('deconv1', reuse=tf.AUTO_REUSE)\n", (33345, 33377), True, 'import tensorflow as tf\n'), ((33492, 33515), 'tensorflow.nn.relu', 'tf.nn.relu', (['rgb_predict'], {}), '(rgb_predict)\n', (33502, 33515), True, 'import tensorflow as tf\n'), ((33553, 33617), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['rgb_predict'], {'training': 'is_training'}), '(rgb_predict, training=is_training)\n', (33582, 33617), True, 'import tensorflow as tf\n'), ((33739, 33788), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""deconv2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('deconv2', reuse=tf.AUTO_REUSE)\n", (33756, 33788), True, 'import tensorflow as tf\n'), ((33899, 33922), 'tensorflow.nn.relu', 'tf.nn.relu', (['rgb_predict'], {}), '(rgb_predict)\n', (33909, 33922), True, 'import tensorflow as tf\n'), ((33960, 34024), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['rgb_predict'], {'training': 'is_training'}), '(rgb_predict, training=is_training)\n', (33989, 34024), True, 'import tensorflow as tf\n'), ((34146, 34201), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb_regresion"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb_regresion', reuse=tf.AUTO_REUSE)\n", (34163, 34201), True, 'import tensorflow as tf\n'), ((34309, 34332), 'tensorflow.nn.relu', 'tf.nn.relu', (['rgb_predict'], {}), '(rgb_predict)\n', (34319, 34332), True, 'import tensorflow as tf\n'), ((34370, 34434), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['rgb_predict', '(self.IMG_H, self.IMG_W)', '(0)'], {}), '(rgb_predict, (self.IMG_H, self.IMG_W), 0)\n', (34392, 34434), True, 'import tensorflow as tf\n'), ((35029, 35080), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""spdeconv1"""'], {'reuse': 'tf.AUTO_REUSE'}), "('spdeconv1', reuse=tf.AUTO_REUSE)\n", (35046, 35080), True, 'import tensorflow as tf\n'), ((35192, 35214), 'tensorflow.nn.relu', 'tf.nn.relu', (['sp_predict'], {}), '(sp_predict)\n', (35202, 35214), True, 'import tensorflow as tf\n'), ((35251, 35314), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['sp_predict'], {'training': 'is_training'}), '(sp_predict, training=is_training)\n', (35280, 35314), True, 'import tensorflow as tf\n'), ((35437, 35486), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""deconv2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('deconv2', reuse=tf.AUTO_REUSE)\n", (35454, 35486), True, 'import tensorflow as tf\n'), ((35594, 35616), 'tensorflow.nn.relu', 'tf.nn.relu', (['sp_predict'], {}), '(sp_predict)\n', (35604, 35616), True, 'import tensorflow as tf\n'), ((35653, 35716), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['sp_predict'], {'training': 'is_training'}), '(sp_predict, training=is_training)\n', (35682, 35716), True, 'import tensorflow as tf\n'), ((35839, 35893), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp_regresion"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp_regresion', reuse=tf.AUTO_REUSE)\n", (35856, 35893), True, 'import tensorflow as tf\n'), ((35998, 36020), 'tensorflow.nn.relu', 'tf.nn.relu', (['sp_predict'], {}), '(sp_predict)\n', (36008, 36020), True, 'import tensorflow as tf\n'), ((36057, 36120), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['sp_predict', '(self.IMG_H, self.IMG_W)', '(0)'], {}), '(sp_predict, (self.IMG_H, self.IMG_W), 0)\n', (36079, 36120), True, 'import tensorflow as tf\n'), ((36565, 36620), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fusiondeconv1"""'], {'reuse': 'tf.AUTO_REUSE'}), "('fusiondeconv1', reuse=tf.AUTO_REUSE)\n", (36582, 36620), True, 'import tensorflow as tf\n'), ((36717, 36735), 'tensorflow.nn.relu', 'tf.nn.relu', (['fusion'], {}), '(fusion)\n', (36727, 36735), True, 'import tensorflow as tf\n'), ((36768, 36827), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['fusion'], {'training': 'is_training'}), '(fusion, training=is_training)\n', (36797, 36827), True, 'import tensorflow as tf\n'), ((36976, 37031), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fusiondeconv2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('fusiondeconv2', reuse=tf.AUTO_REUSE)\n", (36993, 37031), True, 'import tensorflow as tf\n'), ((37176, 37195), 'tensorflow.nn.relu', 'tf.nn.relu', (['fusion2'], {}), '(fusion2)\n', (37186, 37195), True, 'import tensorflow as tf\n'), ((37229, 37289), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['fusion2'], {'training': 'is_training'}), '(fusion2, training=is_training)\n', (37258, 37289), True, 'import tensorflow as tf\n'), ((37467, 37522), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fusiondeconv3"""'], {'reuse': 'tf.AUTO_REUSE'}), "('fusiondeconv3', reuse=tf.AUTO_REUSE)\n", (37484, 37522), True, 'import tensorflow as tf\n'), ((37621, 37640), 'tensorflow.nn.relu', 'tf.nn.relu', (['fusion3'], {}), '(fusion3)\n', (37631, 37640), True, 'import tensorflow as tf\n'), ((37674, 37734), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['fusion3'], {'training': 'is_training'}), '(fusion3, training=is_training)\n', (37703, 37734), True, 'import tensorflow as tf\n'), ((37884, 37939), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fusiondeconv4"""'], {'reuse': 'tf.AUTO_REUSE'}), "('fusiondeconv4', reuse=tf.AUTO_REUSE)\n", (37901, 37939), True, 'import tensorflow as tf\n'), ((38085, 38104), 'tensorflow.nn.relu', 'tf.nn.relu', (['fusion4'], {}), '(fusion4)\n', (38095, 38104), True, 'import tensorflow as tf\n'), ((38138, 38198), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['fusion4'], {'training': 'is_training'}), '(fusion4, training=is_training)\n', (38167, 38198), True, 'import tensorflow as tf\n'), ((38344, 38403), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fusion_regresion1"""'], {'reuse': 'tf.AUTO_REUSE'}), "('fusion_regresion1', reuse=tf.AUTO_REUSE)\n", (38361, 38403), True, 'import tensorflow as tf\n'), ((38500, 38519), 'tensorflow.nn.relu', 'tf.nn.relu', (['fusion5'], {}), '(fusion5)\n', (38510, 38519), True, 'import tensorflow as tf\n'), ((38667, 38726), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fusion_regresion2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('fusion_regresion2', reuse=tf.AUTO_REUSE)\n", (38684, 38726), True, 'import tensorflow as tf\n'), ((38822, 38841), 'tensorflow.nn.relu', 'tf.nn.relu', (['fusion6'], {}), '(fusion6)\n', (38832, 38841), True, 'import tensorflow as tf\n'), ((38990, 39049), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fusion_regresion3"""'], {'reuse': 'tf.AUTO_REUSE'}), "('fusion_regresion3', reuse=tf.AUTO_REUSE)\n", (39007, 39049), True, 'import tensorflow as tf\n'), ((39145, 39164), 'tensorflow.nn.relu', 'tf.nn.relu', (['fusion7'], {}), '(fusion7)\n', (39155, 39164), True, 'import tensorflow as tf\n'), ((39198, 39258), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['fusion7', '(self.IMG_H, self.IMG_W)', '(0)'], {}), '(fusion7, (self.IMG_H, self.IMG_W), 0)\n', (39220, 39258), True, 'import tensorflow as tf\n'), ((39568, 39613), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb', reuse=tf.AUTO_REUSE)\n", (39585, 39613), True, 'import tensorflow as tf\n'), ((39736, 39780), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp', reuse=tf.AUTO_REUSE)\n", (39753, 39780), True, 'import tensorflow as tf\n'), ((39951, 39997), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb2', reuse=tf.AUTO_REUSE)\n", (39968, 39997), True, 'import tensorflow as tf\n'), ((40048, 40093), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp2', reuse=tf.AUTO_REUSE)\n", (40065, 40093), True, 'import tensorflow as tf\n'), ((40250, 40296), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb3"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb3', reuse=tf.AUTO_REUSE)\n", (40267, 40296), True, 'import tensorflow as tf\n'), ((40347, 40392), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp3"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp3', reuse=tf.AUTO_REUSE)\n", (40364, 40392), True, 'import tensorflow as tf\n'), ((40547, 40593), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb4"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb4', reuse=tf.AUTO_REUSE)\n", (40564, 40593), True, 'import tensorflow as tf\n'), ((40645, 40690), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp4"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp4', reuse=tf.AUTO_REUSE)\n", (40662, 40690), True, 'import tensorflow as tf\n'), ((40848, 40894), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb5"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb5', reuse=tf.AUTO_REUSE)\n", (40865, 40894), True, 'import tensorflow as tf\n'), ((40946, 40991), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp5"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp5', reuse=tf.AUTO_REUSE)\n", (40963, 40991), True, 'import tensorflow as tf\n'), ((41149, 41195), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb6"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb6', reuse=tf.AUTO_REUSE)\n", (41166, 41195), True, 'import tensorflow as tf\n'), ((41247, 41292), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp6"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp6', reuse=tf.AUTO_REUSE)\n", (41264, 41292), True, 'import tensorflow as tf\n'), ((41453, 41500), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb66"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb66', reuse=tf.AUTO_REUSE)\n", (41470, 41500), True, 'import tensorflow as tf\n'), ((41552, 41598), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp66"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp66', reuse=tf.AUTO_REUSE)\n", (41569, 41598), True, 'import tensorflow as tf\n'), ((41758, 41805), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb66"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb66', reuse=tf.AUTO_REUSE)\n", (41775, 41805), True, 'import tensorflow as tf\n'), ((41859, 41905), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp66"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp66', reuse=tf.AUTO_REUSE)\n", (41876, 41905), True, 'import tensorflow as tf\n'), ((42065, 42111), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb7"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb7', reuse=tf.AUTO_REUSE)\n", (42082, 42111), True, 'import tensorflow as tf\n'), ((42165, 42210), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp7"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp7', reuse=tf.AUTO_REUSE)\n", (42182, 42210), True, 'import tensorflow as tf\n'), ((42372, 42417), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb', reuse=tf.AUTO_REUSE)\n", (42389, 42417), True, 'import tensorflow as tf\n'), ((42470, 42514), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp', reuse=tf.AUTO_REUSE)\n", (42487, 42514), True, 'import tensorflow as tf\n'), ((42674, 42719), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb', reuse=tf.AUTO_REUSE)\n", (42691, 42719), True, 'import tensorflow as tf\n'), ((42772, 42816), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp', reuse=tf.AUTO_REUSE)\n", (42789, 42816), True, 'import tensorflow as tf\n'), ((44799, 44848), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""deconv1"""'], {'reuse': 'tf.AUTO_REUSE'}), "('deconv1', reuse=tf.AUTO_REUSE)\n", (44816, 44848), True, 'import tensorflow as tf\n'), ((44963, 44986), 'tensorflow.nn.relu', 'tf.nn.relu', (['rgb_predict'], {}), '(rgb_predict)\n', (44973, 44986), True, 'import tensorflow as tf\n'), ((45024, 45088), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['rgb_predict'], {'training': 'is_training'}), '(rgb_predict, training=is_training)\n', (45053, 45088), True, 'import tensorflow as tf\n'), ((45210, 45259), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""deconv2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('deconv2', reuse=tf.AUTO_REUSE)\n", (45227, 45259), True, 'import tensorflow as tf\n'), ((45370, 45393), 'tensorflow.nn.relu', 'tf.nn.relu', (['rgb_predict'], {}), '(rgb_predict)\n', (45380, 45393), True, 'import tensorflow as tf\n'), ((45431, 45495), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['rgb_predict'], {'training': 'is_training'}), '(rgb_predict, training=is_training)\n', (45460, 45495), True, 'import tensorflow as tf\n'), ((45617, 45672), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rgb_regresion"""'], {'reuse': 'tf.AUTO_REUSE'}), "('rgb_regresion', reuse=tf.AUTO_REUSE)\n", (45634, 45672), True, 'import tensorflow as tf\n'), ((45780, 45803), 'tensorflow.nn.relu', 'tf.nn.relu', (['rgb_predict'], {}), '(rgb_predict)\n', (45790, 45803), True, 'import tensorflow as tf\n'), ((45841, 45905), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['rgb_predict', '(self.IMG_H, self.IMG_W)', '(0)'], {}), '(rgb_predict, (self.IMG_H, self.IMG_W), 0)\n', (45863, 45905), True, 'import tensorflow as tf\n'), ((46500, 46551), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""spdeconv1"""'], {'reuse': 'tf.AUTO_REUSE'}), "('spdeconv1', reuse=tf.AUTO_REUSE)\n", (46517, 46551), True, 'import tensorflow as tf\n'), ((46663, 46685), 'tensorflow.nn.relu', 'tf.nn.relu', (['sp_predict'], {}), '(sp_predict)\n', (46673, 46685), True, 'import tensorflow as tf\n'), ((46722, 46785), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['sp_predict'], {'training': 'is_training'}), '(sp_predict, training=is_training)\n', (46751, 46785), True, 'import tensorflow as tf\n'), ((46908, 46957), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""deconv2"""'], {'reuse': 'tf.AUTO_REUSE'}), "('deconv2', reuse=tf.AUTO_REUSE)\n", (46925, 46957), True, 'import tensorflow as tf\n'), ((47065, 47087), 'tensorflow.nn.relu', 'tf.nn.relu', (['sp_predict'], {}), '(sp_predict)\n', (47075, 47087), True, 'import tensorflow as tf\n'), ((47124, 47187), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['sp_predict'], {'training': 'is_training'}), '(sp_predict, training=is_training)\n', (47153, 47187), True, 'import tensorflow as tf\n'), ((47310, 47364), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sp_regresion"""'], {'reuse': 'tf.AUTO_REUSE'}), "('sp_regresion', reuse=tf.AUTO_REUSE)\n", (47327, 47364), True, 'import tensorflow as tf\n'), ((47469, 47491), 'tensorflow.nn.relu', 'tf.nn.relu', (['sp_predict'], {}), '(sp_predict)\n', (47479, 47491), True, 'import tensorflow as tf\n'), ((47528, 47591), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['sp_predict', '(self.IMG_H, self.IMG_W)', '(0)'], {}), '(sp_predict, (self.IMG_H, self.IMG_W), 0)\n', (47550, 47591), True, 'import tensorflow as tf\n'), ((48654, 48687), 'tensorflow.subtract', 'tf.subtract', (['predict', 'groundtruth'], {}), '(predict, groundtruth)\n', (48665, 48687), True, 'import tensorflow as tf\n'), ((49892, 49925), 'tensorflow.subtract', 'tf.subtract', (['predict', 'groundtruth'], {}), '(predict, groundtruth)\n', (49903, 49925), True, 'import tensorflow as tf\n'), ((50212, 50254), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (50229, 50254), True, 'import tensorflow as tf\n'), ((1828, 1861), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (1859, 1861), True, 'import tensorflow as tf\n'), ((1880, 1913), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (1911, 1913), True, 'import tensorflow as tf\n'), ((1933, 1970), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (1965, 1970), True, 'import tensorflow as tf\n'), ((1988, 2025), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (2020, 2025), True, 'import tensorflow as tf\n'), ((3448, 3481), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (3479, 3481), True, 'import tensorflow as tf\n'), ((3500, 3533), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (3531, 3533), True, 'import tensorflow as tf\n'), ((3553, 3590), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (3585, 3590), True, 'import tensorflow as tf\n'), ((3608, 3645), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.1)'], {}), '(0.1)\n', (3640, 3645), True, 'import tensorflow as tf\n'), ((7729, 7761), 'tensorflow.reshape', 'tf.reshape', (['pool', '(shape[0], -1)'], {}), '(pool, (shape[0], -1))\n', (7739, 7761), True, 'import tensorflow as tf\n'), ((7795, 7827), 'tensorflow.reshape', 'tf.reshape', (['pool', '(shape[0], -1)'], {}), '(pool, (shape[0], -1))\n', (7805, 7827), True, 'import tensorflow as tf\n'), ((7855, 7891), 'tensorflow.concat', 'tf.concat', (['(x_flatten, pool)'], {'axis': '(1)'}), '((x_flatten, pool), axis=1)\n', (7864, 7891), True, 'import tensorflow as tf\n'), ((17957, 17983), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""temp1"""'], {}), "('temp1')\n", (17974, 17983), True, 'import tensorflow as tf\n'), ((18023, 18058), 'tensorflow.concat', 'tf.concat', (['[p1, image_guided[0]]', '(3)'], {}), '([p1, image_guided[0]], 3)\n', (18032, 18058), True, 'import tensorflow as tf\n'), ((18674, 18700), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""temp2"""'], {}), "('temp2')\n", (18691, 18700), True, 'import tensorflow as tf\n'), ((18740, 18775), 'tensorflow.concat', 'tf.concat', (['[p2, image_guided[1]]', '(3)'], {}), '([p2, image_guided[1]], 3)\n', (18749, 18775), True, 'import tensorflow as tf\n'), ((19391, 19417), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""temp3"""'], {}), "('temp3')\n", (19408, 19417), True, 'import tensorflow as tf\n'), ((19457, 19492), 'tensorflow.concat', 'tf.concat', (['[p3, image_guided[2]]', '(3)'], {}), '([p3, image_guided[2]], 3)\n', (19466, 19492), True, 'import tensorflow as tf\n'), ((20108, 20134), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""temp4"""'], {}), "('temp4')\n", (20125, 20134), True, 'import tensorflow as tf\n'), ((20174, 20209), 'tensorflow.concat', 'tf.concat', (['[p4, image_guided[3]]', '(3)'], {}), '([p4, image_guided[3]], 3)\n', (20183, 20209), True, 'import tensorflow as tf\n'), ((21119, 21145), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""temp5"""'], {}), "('temp5')\n", (21136, 21145), True, 'import tensorflow as tf\n'), ((21185, 21220), 'tensorflow.concat', 'tf.concat', (['[p5, image_guided[3]]', '(3)'], {}), '([p5, image_guided[3]], 3)\n', (21194, 21220), True, 'import tensorflow as tf\n'), ((48512, 48551), 'tensorflow.subtract', 'tf.subtract', (['predictfusion', 'groundtruth'], {}), '(predictfusion, groundtruth)\n', (48523, 48551), True, 'import tensorflow as tf\n'), ((49304, 49337), 'tensorflow.subtract', 'tf.subtract', (['predict', 'groundtruth'], {}), '(predict, groundtruth)\n', (49315, 49337), True, 'import tensorflow as tf\n'), ((50273, 50310), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (50295, 50310), True, 'import tensorflow as tf\n'), ((48393, 48429), 'tensorflow.subtract', 'tf.subtract', (['predictrgb', 'groundtruth'], {}), '(predictrgb, groundtruth)\n', (48404, 48429), True, 'import tensorflow as tf\n'), ((48453, 48488), 'tensorflow.subtract', 'tf.subtract', (['predictsp', 'groundtruth'], {}), '(predictsp, groundtruth)\n', (48464, 48488), True, 'import tensorflow as tf\n'), ((49634, 49669), 'tensorflow.subtract', 'tf.subtract', (['groundtruth2', 'predict2'], {}), '(groundtruth2, predict2)\n', (49645, 49669), True, 'import tensorflow as tf\n'), ((49669, 49704), 'tensorflow.multiply', 'tf.multiply', (['predict2', 'groundtruth2'], {}), '(predict2, groundtruth2)\n', (49680, 49704), True, 'import tensorflow as tf\n'), ((7326, 7351), 'numpy.ceil', 'np.ceil', (['(shape[1] / l + 1)'], {}), '(shape[1] / l + 1)\n', (7333, 7351), True, 'import numpy as np\n'), ((7369, 7394), 'numpy.ceil', 'np.ceil', (['(shape[2] / l + 1)'], {}), '(shape[2] / l + 1)\n', (7376, 7394), True, 'import numpy as np\n'), ((7435, 7461), 'numpy.floor', 'np.floor', (['(shape[1] / l + 1)'], {}), '(shape[1] / l + 1)\n', (7443, 7461), True, 'import numpy as np\n'), ((7480, 7506), 'numpy.floor', 'np.floor', (['(shape[2] / l + 1)'], {}), '(shape[2] / l + 1)\n', (7488, 7506), True, 'import numpy as np\n')] |
## HAND discretised and visualised
## <NAME>
import rasterio as rio
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import argparse
def argparser():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dd", type=str, help="")
parser.add_argument("-c", "--cmap", type=str, help="")
parser.add_argument("-b", "--bins", type=int, help="")
parser.add_argument("-o", "--output", type=str, help="")
args = parser.parse_args()
if not args.dd:
parser.error('-d --dd Distance down raster')
if not args.cmap:
parser.error('-c --cmap Name of Matplotlib colormap')
if not args.bins:
parser.error('-b --bins Number of bins for discretization')
if not args.output:
parser.error('-o --output Output PNG of distance down')
return(args)
def main():
options = argparser()
basindd = rio.open(options.dd)
basinddma = ma.array(basindd.read(),mask=(basindd.read()==basindd.nodata))
#basinddma.fill_value = -1.
basinddmasort = np.sort(ma.array(basinddma.data,mask=((basinddma.mask)|(basinddma.data==0.))).compressed())
bins = options.bins
basinddmasortbins = np.zeros(bins+1)
basinddmasortbins[0] = basinddmasort.min()
for i in range(1,bins+1):
basinddmasortbins[i] = basinddmasort[round(len(basinddmasort)/float(bins)*float(i))-1]
## basinddmasort[round(len(basinddmasort)/7.*7)-1]==basinddmasort.max()==True
#plt.hist(basinddmasort,bins=basinddmasortbins)
#plt.show()
## "<=" in the first condition below
## because we have intentionally excluded 0s as nodata values
basinddmasortbinsmean = np.zeros(bins)
basinddmasortbinsmean[0] = basinddmasort[(basinddmasortbins[0]<=basinddmasort)&(basinddmasort<=basinddmasortbins[1])].mean()
for i in range(1,bins):
basinddmasortbinsmean[i] = basinddmasort[(basinddmasortbins[i]<basinddmasort)&(basinddmasort<=basinddmasortbins[i+1])].mean()
basinddmadigi = np.digitize(basinddma,basinddmasortbins,right=True)
cmap = mpl.cm.get_cmap(options.cmap)
cmapdiscretear = cmap(np.linspace(0,1,bins))
for i in range(1,bins-1):
cmapdiscretear[i] = np.append(cmap.colors[int(round(float(len(cmap.colors))/(basinddmasortbinsmean[bins-1]-basinddmasortbinsmean[0])*np.cumsum(np.diff(basinddmasortbinsmean))[i-1]))-1],1.)
cmapdiscrete = mpl.colors.ListedColormap(cmapdiscretear)
fig,ax = plt.subplots()
cax = ax.imshow(basinddmadigi.squeeze(),interpolation='none',cmap=cmapdiscrete)
ax.set_title('Vertical distance down to nearest drainage')
cbar = fig.colorbar(cax)
basinddmasortbinsstr = ['{:.2f}'.format(x) for x in basinddmasortbins.tolist()]
cbar.ax.set_yticklabels([s + ' m' for s in basinddmasortbinsstr])
plt.savefig(options.output)
plt.show()
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"numpy.ma.array",
"matplotlib.use",
"rasterio.open",
"numpy.digitize",
"numpy.diff",
"matplotlib.colors.ListedColormap",
"numpy.zeros",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.show"
] | [((94, 108), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (101, 108), True, 'import matplotlib as mpl\n'), ((230, 255), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (253, 255), False, 'import argparse\n'), ((933, 953), 'rasterio.open', 'rio.open', (['options.dd'], {}), '(options.dd)\n', (941, 953), True, 'import rasterio as rio\n'), ((1225, 1243), 'numpy.zeros', 'np.zeros', (['(bins + 1)'], {}), '(bins + 1)\n', (1233, 1243), True, 'import numpy as np\n'), ((1700, 1714), 'numpy.zeros', 'np.zeros', (['bins'], {}), '(bins)\n', (1708, 1714), True, 'import numpy as np\n'), ((2026, 2079), 'numpy.digitize', 'np.digitize', (['basinddma', 'basinddmasortbins'], {'right': '(True)'}), '(basinddma, basinddmasortbins, right=True)\n', (2037, 2079), True, 'import numpy as np\n'), ((2089, 2118), 'matplotlib.cm.get_cmap', 'mpl.cm.get_cmap', (['options.cmap'], {}), '(options.cmap)\n', (2104, 2118), True, 'import matplotlib as mpl\n'), ((2414, 2455), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['cmapdiscretear'], {}), '(cmapdiscretear)\n', (2439, 2455), True, 'import matplotlib as mpl\n'), ((2469, 2483), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2481, 2483), True, 'import matplotlib.pyplot as plt\n'), ((2818, 2845), 'matplotlib.pyplot.savefig', 'plt.savefig', (['options.output'], {}), '(options.output)\n', (2829, 2845), True, 'import matplotlib.pyplot as plt\n'), ((2850, 2860), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2858, 2860), True, 'import matplotlib.pyplot as plt\n'), ((2145, 2168), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'bins'], {}), '(0, 1, bins)\n', (2156, 2168), True, 'import numpy as np\n'), ((1093, 1164), 'numpy.ma.array', 'ma.array', (['basinddma.data'], {'mask': '(basinddma.mask | (basinddma.data == 0.0))'}), '(basinddma.data, mask=basinddma.mask | (basinddma.data == 0.0))\n', (1101, 1164), True, 'import numpy.ma as ma\n'), ((2349, 2379), 'numpy.diff', 'np.diff', (['basinddmasortbinsmean'], {}), '(basinddmasortbinsmean)\n', (2356, 2379), True, 'import numpy as np\n')] |
import numpy as np
class GridMove:
def __init__(self, grid):
self._Grid = grid
self._height = grid.shape[0]
self._width = grid.shape[1]
def _in_bound(self, x, y):
if x < 0 or x >= self._height:
return False
if y < 0 or y >= self._width:
return False
return True
def get_next(self, action_name, index):
move = lambda index, x, y: self._Grid[x, y] if self._in_bound(x, y) else index
switcher = {
"UP": lambda index, x, y: move(index, x - 1, y),
"RIGHT": lambda index, x, y: move(index, x, y + 1),
"DOWN": lambda index, x, y: move(index, x + 1, y),
"LEFT": lambda index, x, y: move(index, x, y - 1),
"U": lambda index, x, y: move(index, x - 1, y),
"R": lambda index, x, y: move(index, x, y + 1),
"D": lambda index, x, y: move(index, x + 1, y),
"L": lambda index, x, y: move(index, x, y - 1),
}
move_func = switcher.get(action_name)
x, y = self.get_position(index)
return move_func(index, x, y)
def get_position(self, index):
result_index = np.where(self._Grid == index)
return result_index[0][0], result_index[1][0]
| [
"numpy.where"
] | [((1183, 1212), 'numpy.where', 'np.where', (['(self._Grid == index)'], {}), '(self._Grid == index)\n', (1191, 1212), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from ..analyzer import BaseAnalyzer
from ...expr.arithmetic import *
from ...expr.math import *
from ...expr.datetimes import *
from ...expr.strings import *
from ...expr.strings import Count as StrCount
from ...expr.element import *
from ...expr.reduction import *
from ...expr.collections import *
from ...expr.merge import *
from ...utils import output
from ..errors import CompileError
from ..utils import refresh_dynamic
from ... import types
from .... import compat
from ....utils import to_text
class Analyzer(BaseAnalyzer):
def _parents(self, expr):
return self._dag.successors(expr)
def visit_element_op(self, expr):
if isinstance(expr, Between):
if expr.inclusive:
sub = ((expr.left <= expr.input) & (expr.input.copy() <= expr.right))
else:
sub = ((expr.left < expr.input) & (expr.input.copy() < expr.right))
self._sub(expr, sub.rename(expr.name))
elif isinstance(expr, Cut):
sub = self._get_cut_sub_expr(expr)
self._sub(expr, sub)
else:
raise NotImplementedError
def visit_sample(self, expr):
if expr._parts is None:
raise CompileError('ODPS SQL only support sampling by specifying `parts` arg')
idxes = [None, ] if expr._i is None else expr._i
condition = None
for idx in idxes:
inputs = [expr._parts]
if idx is not None:
new_val = idx.value + 1
inputs.append(Scalar(_value=new_val, _value_type=idx.value_type))
if expr._sampled_fields:
inputs.extend(expr._sampled_fields)
cond = MappedExpr(_inputs=inputs, _func='SAMPLE', _data_type=types.boolean)
if condition is None:
condition = cond
else:
condition |= cond
sub = FilterCollectionExpr(_input=expr.input, _predicate=condition,
_schema=expr.schema)
expr.input.optimize_banned = True
self._sub(expr, sub)
def _visit_pivot(self, expr):
sub = self._get_pivot_sub_expr(expr)
self._sub(expr, sub)
def _visit_pivot_table(self, expr):
sub = self._get_pivot_table_sub_expr(expr)
self._sub(expr, sub)
def visit_pivot(self, expr):
if isinstance(expr, PivotCollectionExpr):
self._visit_pivot(expr)
else:
self._visit_pivot_table(expr)
def visit_extract_kv(self, expr):
kv_delimiter = expr._kv_delimiter.value
item_delimiter = expr._item_delimiter.value
default = expr._default.value if expr._default else None
class KeyAgg(object):
def buffer(self):
return set()
def __call__(self, buf, val):
if not val:
return
def validate_kv(v):
parts = v.split(kv_delimiter)
if len(parts) != 2:
raise ValueError('Malformed KV pair: %s' % v)
return parts[0]
buf.update([validate_kv(item) for item in val.split(item_delimiter)])
def merge(self, buf, pbuffer):
buf.update(pbuffer)
def getvalue(self, buf):
return item_delimiter.join(sorted(buf))
columns_expr = expr.input.exclude(expr._intact).apply(KeyAgg, names=[c.name for c in expr._columns])
intact_names = [g.name for g in expr._intact]
intact_types = [g.dtype for g in expr._intact]
exprs = [expr]
def callback(result, new_expr):
expr = exprs[0]
names = list(intact_names)
tps = list(intact_types)
kv_slot_map = dict()
for col, key_str in compat.izip(result.columns, result[0]):
kv_slot_map[col.name] = dict()
for k in key_str.split(item_delimiter):
names.append('%s_%s' % (col.name, k))
tps.append(expr._column_type)
kv_slot_map[col.name][k] = len(names) - 1
kv_slot_names = list(kv_slot_map.keys())
type_adapter = None
if isinstance(expr._column_type, types.Float):
type_adapter = float
elif isinstance(expr._column_type, types.Integer):
type_adapter = int
@output(names, tps)
def mapper(row):
ret = [default, ] * len(names)
ret[:len(intact_names)] = [getattr(row, col) for col in intact_names]
for col in kv_slot_names:
kv_val = getattr(row, col)
if not kv_val:
continue
for kv_item in kv_val.split(item_delimiter):
k, v = kv_item.split(kv_delimiter)
if type_adapter:
v = type_adapter(v)
ret[kv_slot_map[col][k]] = v
return tuple(ret)
new_expr._schema = Schema.from_lists(names, tps)
extracted = expr.input.map_reduce(mapper)
self._sub(new_expr, extracted)
# trigger refresh of dynamic operations
refresh_dynamic(extracted, self._dag)
sub = CollectionExpr(_schema=DynamicSchema.from_lists(intact_names, intact_types),
_deps=[(columns_expr, callback)])
self._sub(expr, sub)
def visit_value_counts(self, expr):
self._sub(expr, self._get_value_counts_sub_expr(expr))
def _gen_mapped_expr(self, expr, inputs, func, name,
args=None, kwargs=None, multiple=False):
kwargs = dict(_inputs=inputs, _func=func, _name=name,
_func_args=args, _func_kwargs=kwargs,
_multiple=multiple)
if isinstance(expr, SequenceExpr):
kwargs['_data_type'] = expr.dtype
else:
kwargs['_value_type'] = expr.dtype
return MappedExpr(**kwargs)
def visit_binary_op(self, expr):
if not options.df.analyze:
raise NotImplementedError
if isinstance(expr, FloorDivide):
func = lambda l, r: l // r
# multiple False will pass *args instead of namedtuple
sub = self._gen_mapped_expr(expr, (expr.lhs, expr.rhs),
func, expr.name, multiple=False)
self._sub(expr, sub)
return
if isinstance(expr, Mod):
func = lambda l, r: l % r
sub = self._gen_mapped_expr(expr, (expr.lhs, expr.rhs),
func, expr.name, multiple=False)
self._sub(expr, sub)
return
if isinstance(expr, Add) and \
all(child.dtype == types.datetime for child in (expr.lhs, expr.rhs)):
return
elif isinstance(expr, (Add, Substract)):
if expr.lhs.dtype == types.datetime and expr.rhs.dtype == types.datetime:
pass
elif any(isinstance(child, MilliSecondScalar) for child in (expr.lhs, expr.rhs)):
pass
else:
return
if sys.version_info[:2] <= (2, 6):
def total_seconds(self):
return self.days * 86400.0 + self.seconds + self.microseconds * 1.0e-6
else:
from datetime import timedelta
def total_seconds(self):
return self.total_seconds()
def func(l, r, method):
from datetime import datetime, timedelta
if not isinstance(l, datetime):
l = timedelta(milliseconds=l)
if not isinstance(r, datetime):
r = timedelta(milliseconds=r)
if method == '+':
res = l + r
else:
res = l - r
if isinstance(res, timedelta):
return int(total_seconds(res) * 1000)
return res
inputs = expr.lhs, expr.rhs, Scalar('+') if isinstance(expr, Add) else Scalar('-')
sub = self._gen_mapped_expr(expr, inputs, func, expr.name, multiple=False)
self._sub(expr, sub)
raise NotImplementedError
def visit_unary_op(self, expr):
if not options.df.analyze:
raise NotImplementedError
if isinstance(expr, Invert) and isinstance(expr.input.dtype, types.Integer):
sub = expr.input.map(lambda x: ~x)
self._sub(expr, sub)
return
raise NotImplementedError
def visit_math(self, expr):
if not options.df.analyze:
raise NotImplementedError
if expr.dtype != types.decimal:
if isinstance(expr, Arccosh):
def func(x):
import numpy as np
return float(np.arccosh(x))
elif isinstance(expr, Arcsinh):
def func(x):
import numpy as np
return float(np.arcsinh(x))
elif isinstance(expr, Arctanh):
def func(x):
import numpy as np
return float(np.arctanh(x))
elif isinstance(expr, Radians):
def func(x):
import numpy as np
return float(np.radians(x))
elif isinstance(expr, Degrees):
def func(x):
import numpy as np
return float(np.degrees(x))
else:
raise NotImplementedError
sub = expr.input.map(func, expr.dtype)
self._sub(expr, sub)
return
raise NotImplementedError
def visit_datetime_op(self, expr):
if isinstance(expr, Strftime):
if not options.df.analyze:
raise NotImplementedError
date_format = expr.date_format
def func(x):
return x.strftime(date_format)
sub = expr.input.map(func, expr.dtype)
self._sub(expr, sub)
return
raise NotImplementedError
def visit_string_op(self, expr):
if isinstance(expr, Ljust):
rest = expr.width - expr.input.len()
sub = expr.input + \
(rest >= 0).ifelse(expr._fillchar.repeat(rest), '')
self._sub(expr, sub.rename(expr.name))
return
elif isinstance(expr, Rjust):
rest = expr.width - expr.input.len()
sub = (rest >= 0).ifelse(expr._fillchar.repeat(rest), '') + expr.input
self._sub(expr, sub.rename(expr.name))
return
elif isinstance(expr, Zfill):
fillchar = Scalar('0')
rest = expr.width - expr.input.len()
sub = (rest >= 0).ifelse(fillchar.repeat(rest), '') + expr.input
self._sub(expr, sub.rename(expr.name))
return
elif isinstance(expr, CatStr):
input = expr.input
others = expr._others if isinstance(expr._others, Iterable) else (expr._others, )
for other in others:
if expr.na_rep is not None:
for e in (input, ) + tuple(others):
self._sub(e, e.fillna(expr.na_rep), parents=(expr, ))
return
else:
if expr._sep is not None:
input = other.isnull().ifelse(input, input + expr._sep + other)
else:
input = other.isnull().ifelse(input, input + other)
self._sub(expr, input.rename(expr.name))
return
if not options.df.analyze:
raise NotImplementedError
func = None
if isinstance(expr, Contains) and expr.regex:
def func(x, pat, case, flags):
if x is None:
return False
flgs = 0
if not case:
flgs = re.I
if flags > 0:
flgs = flgs | flags
r = re.compile(pat, flgs)
return r.search(x) is not None
pat = expr._pat if not isinstance(expr._pat, StringScalar) or expr._pat._value is None \
else Scalar(re.escape(to_text(expr.pat)))
inputs = expr.input, pat, expr._case, expr._flags
sub = self._gen_mapped_expr(expr, inputs, func,
expr.name, multiple=False)
self._sub(expr, sub)
return
elif isinstance(expr, StrCount):
def func(x, pat, flags):
regex = re.compile(pat, flags=flags)
return len(regex.findall(x))
pat = expr._pat if not isinstance(expr._pat, StringScalar) or expr._pat._value is None \
else Scalar(re.escape(to_text(expr.pat)))
inputs = expr.input, pat, expr._flags
sub = self._gen_mapped_expr(expr, inputs, func,
expr.name, multiple=False)
self._sub(expr, sub)
return
elif isinstance(expr, Find) and expr.end is not None:
start = expr.start
end = expr.end
substr = expr.sub
def func(x):
return x.find(substr, start, end)
elif isinstance(expr, RFind):
start = expr.start
end = expr.end
substr = expr.sub
def func(x):
return x.rfind(substr, start, end)
elif isinstance(expr, Extract):
def func(x, pat, flags, group):
regex = re.compile(pat, flags=flags)
m = regex.search(x)
if m:
if group is None:
return m.group()
return m.group(group)
pat = expr._pat if not isinstance(expr._pat, StringScalar) or expr._pat._value is None \
else Scalar(re.escape(to_text(expr.pat)))
inputs = expr.input, pat, expr._flags, expr._group
sub = self._gen_mapped_expr(expr, inputs, func,
expr.name, multiple=False)
self._sub(expr, sub)
return
elif isinstance(expr, Replace):
use_regex = [expr.regex]
def func(x, pat, repl, n, case, flags):
use_re = use_regex[0] and (not case or len(pat) > 1 or flags)
if use_re:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
n = n if n >= 0 else 0
return regex.sub(repl, x, count=n)
else:
return x.replace(pat, repl, n)
pat = expr._pat if not isinstance(expr._pat, StringScalar) or expr._value is None \
else Scalar(re.escape(to_text(expr.pat)))
inputs = expr.input, pat, expr._repl, expr._n, \
expr._case, expr._flags
sub = self._gen_mapped_expr(expr, inputs, func,
expr.name, multiple=False)
self._sub(expr, sub)
return
elif isinstance(expr, (Lstrip, Strip, Rstrip)) and expr.to_strip != ' ':
to_strip = expr.to_strip
if isinstance(expr, Lstrip):
def func(x):
return x.lstrip(to_strip)
elif isinstance(expr, Strip):
def func(x):
return x.strip(to_strip)
elif isinstance(expr, Rstrip):
def func(x):
return x.rstrip(to_strip)
elif isinstance(expr, Pad):
side = expr.side
fillchar = expr.fillchar
width = expr.width
if side == 'left':
func = lambda x: x.rjust(width, fillchar)
elif side == 'right':
func = lambda x: x.ljust(width, fillchar)
elif side == 'both':
func = lambda x: x.center(width, fillchar)
else:
raise NotImplementedError
elif isinstance(expr, Slice):
start, end, step = expr.start, expr.end, expr.step
if end is None and step is None:
raise NotImplementedError
if isinstance(start, six.integer_types) and \
isinstance(end, six.integer_types) and step is None:
if start >= 0 and end >= 0:
raise NotImplementedError
has_start = start is not None
has_end = end is not None
has_step = step is not None
def func(x, *args):
idx = 0
s, e, t = None, None, None
for i in range(3):
if i == 0 and has_start:
s = args[idx]
idx += 1
if i == 1 and has_end:
e = args[idx]
idx += 1
if i == 2 and has_step:
t = args[idx]
idx += 1
return x[s: e: t]
inputs = expr.input, expr._start, expr._end, expr._step
sub = self._gen_mapped_expr(expr, tuple(i for i in inputs if i is not None),
func, expr.name, multiple=False)
self._sub(expr, sub)
return
elif isinstance(expr, Swapcase):
func = lambda x: x.swapcase()
elif isinstance(expr, Title):
func = lambda x: x.title()
elif isinstance(expr, Strptime):
date_format = expr.date_format
def func(x):
from datetime import datetime
return datetime.strptime(x, date_format)
else:
if isinstance(expr, Isalnum):
func = lambda x: x.isalnum()
elif isinstance(expr, Isalpha):
func = lambda x: x.isalpha()
elif isinstance(expr, Isdigit):
func = lambda x: x.isdigit()
elif isinstance(expr, Isspace):
func = lambda x: x.isspace()
elif isinstance(expr, Islower):
func = lambda x: x.islower()
elif isinstance(expr, Isupper):
func = lambda x: x.isupper()
elif isinstance(expr, Istitle):
func = lambda x: x.istitle()
elif isinstance(expr, (Isnumeric, Isdecimal)):
def u_safe(s):
try:
return unicode(s, "unicode_escape")
except:
return s
if isinstance(expr, Isnumeric):
func = lambda x: u_safe(x).isnumeric()
else:
func = lambda x: u_safe(x).isdecimal()
if func is not None:
sub = expr.input.map(func, expr.dtype)
self._sub(expr, sub)
return
raise NotImplementedError
def visit_reduction(self, expr):
if isinstance(expr, (Var, GroupedVar)):
std = expr.input.std(ddof=expr._ddof)
if isinstance(expr, GroupedVar):
std = std.to_grouped_reduction(expr._grouped)
sub = (std ** 2).rename(expr.name)
self._sub(expr, sub)
return
elif isinstance(expr, (Moment, GroupedMoment)):
order = expr._order
center = expr._center
sub = self._get_moment_sub_expr(expr, expr.input, order, center)
sub = sub.rename(expr.name)
self._sub(expr, sub)
return
elif isinstance(expr, (Skewness, GroupedSkewness)):
std = expr.input.std(ddof=1)
if isinstance(expr, GroupedSequenceReduction):
std = std.to_grouped_reduction(expr._grouped)
cnt = expr.input.count()
if isinstance(expr, GroupedSequenceReduction):
cnt = cnt.to_grouped_reduction(expr._grouped)
sub = self._get_moment_sub_expr(expr, expr.input, 3, True) / (std ** 3)
sub *= (cnt ** 2) / (cnt - 1) / (cnt - 2)
sub = sub.rename(expr.name)
self._sub(expr, sub)
elif isinstance(expr, (Kurtosis, GroupedKurtosis)):
std = expr.input.std(ddof=0)
if isinstance(expr, GroupedSequenceReduction):
std = std.to_grouped_reduction(expr._grouped)
cnt = expr.input.count()
if isinstance(expr, GroupedSequenceReduction):
cnt = cnt.to_grouped_reduction(expr._grouped)
m4 = self._get_moment_sub_expr(expr, expr.input, 4, True)
sub = 1.0 / (cnt - 2) / (cnt - 3) * ((cnt * cnt - 1) * m4 / (std ** 4) - 3 * (cnt - 1) ** 2)
sub = sub.rename(expr.name)
self._sub(expr, sub)
raise NotImplementedError
| [
"numpy.radians",
"re.compile",
"datetime.datetime.strptime",
"numpy.arcsinh",
"numpy.arccosh",
"numpy.arctanh",
"numpy.degrees",
"datetime.timedelta"
] | [((12927, 12948), 're.compile', 're.compile', (['pat', 'flgs'], {}), '(pat, flgs)\n', (12937, 12948), False, 'import re\n'), ((13499, 13527), 're.compile', 're.compile', (['pat'], {'flags': 'flags'}), '(pat, flags=flags)\n', (13509, 13527), False, 'import re\n'), ((8437, 8462), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': 'l'}), '(milliseconds=l)\n', (8446, 8462), False, 'from datetime import datetime, timedelta\n'), ((8535, 8560), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': 'r'}), '(milliseconds=r)\n', (8544, 8560), False, 'from datetime import datetime, timedelta\n'), ((9685, 9698), 'numpy.arccosh', 'np.arccosh', (['x'], {}), '(x)\n', (9695, 9698), True, 'import numpy as np\n'), ((9845, 9858), 'numpy.arcsinh', 'np.arcsinh', (['x'], {}), '(x)\n', (9855, 9858), True, 'import numpy as np\n'), ((10005, 10018), 'numpy.arctanh', 'np.arctanh', (['x'], {}), '(x)\n', (10015, 10018), True, 'import numpy as np\n'), ((14499, 14527), 're.compile', 're.compile', (['pat'], {'flags': 'flags'}), '(pat, flags=flags)\n', (14509, 14527), False, 'import re\n'), ((10165, 10178), 'numpy.radians', 'np.radians', (['x'], {}), '(x)\n', (10175, 10178), True, 'import numpy as np\n'), ((10325, 10338), 'numpy.degrees', 'np.degrees', (['x'], {}), '(x)\n', (10335, 10338), True, 'import numpy as np\n'), ((15452, 15480), 're.compile', 're.compile', (['pat'], {'flags': 'flags'}), '(pat, flags=flags)\n', (15462, 15480), False, 'import re\n'), ((18696, 18729), 'datetime.datetime.strptime', 'datetime.strptime', (['x', 'date_format'], {}), '(x, date_format)\n', (18713, 18729), False, 'from datetime import datetime\n')] |
import typing
from scipy.interpolate import interp1d
import numpy as np
import slippy
from slippy.core import _SubModelABC
from slippy.core.materials import _IMMaterial
from slippy.core.influence_matrix_utils import bccg, plan_convolve
# TODO add from_offset option to get the displacement from the offset
class TangentialPartialSlip(_SubModelABC):
""" Solves the partial slip problem
Parameters
----------
name: str
The name of the sub model, used for debugging
direction: {'x', 'y'}
The direction of applied load or displacement, only 'x' and 'y' are currently supported
load, displacement: float or sequence of floats
Up to one can be supplied, either the total load or the rigid body displacement. Suitable values are:
- float: indicating a constant load/ displacement
- 2 by n array: of time points and load/ displacement values
If an array is supplied and it is too short it is extrapolated by repeating the final value, this produces a
warning. If neither are supplied this sub-model requires rigid_body_displacement to be provided by a further
sub-model
periodic_axes: 2 element sequence of bool, optional (False, False)
True for each axis which the solution should be periodic in, should match solving step
tol: float, optional (1e-7)
The tolerance used to declare convergence for the bccg iterations
max_it: int, optional (None)
The maximum number of iterations for the bccg iterations, defaults to the same as the number of contact nodes
"""
def __init__(self, name: str, direction: str,
load: typing.Union[float, typing.Sequence] = None,
displacement: typing.Union[float, typing.Sequence] = None,
periodic_axes: typing.Sequence[bool] = (False, False),
tol: float = 1e-7, max_it: int = None):
requires = {'maximum_tangential_force', 'contact_nodes', 'time'}
if load is None and displacement is None:
self.displacement_from_sub_model = True
requires.add('rigid_body_displacement_' + direction)
self.update_displacement = False
else:
self.displacement_from_sub_model = False
provides = {'slip_distance', 'stick_nodes', 'loads_x', 'loads_y', 'total_displacement_x',
'total_displacement_y'}
super().__init__(name, requires, provides)
self.load_controlled = False
if load is not None:
if displacement is not None:
raise ValueError("Either the load or the displacement can be set, not both")
try:
self.load = float(load)
self.update_load = False
self.load_upd = None
except TypeError:
self.load = None
self.load_upd = interp1d(load[0, :], load[1, :], fill_value='extrapolate')
self.update_load = True
self.load_controlled = True
if displacement is not None:
try:
self.displacement = float(displacement)
self.update_displacement = False
self.displacement_upd = None
except TypeError:
self.displacement = None
self.displacement_upd = interp1d(displacement[0, :], displacement[1, :], fill_value='extrapolate')
self.update_displacement = True
self.component = direction * 2
self._last_span = None
self._pre_solve_checks = False
self._im_1 = None
self._im_2 = None
self._im_total = None
self._periodic_axes = periodic_axes
self._tol = tol
self._max_it = max_it
self.previous_result = None
def _check(self, span):
# check that both are im materials and store ims
if isinstance(self.model.surface_1.material, _IMMaterial) and \
isinstance(self.model.surface_2.material, _IMMaterial):
im_1 = self.model.surface_1.material.influence_matrix([self.component],
[self.model.surface_1.grid_spacing] * 2,
span)[self.component]
im_2 = self.model.surface_2.material.influence_matrix([self.component],
[self.model.surface_1.grid_spacing] * 2,
span)[self.component]
self._im_1 = im_1
self._im_2 = im_2
self._im_total = im_1 + im_2
self._pre_solve_checks = True
else:
raise ValueError("This sub model only supports influence matrix based materials")
def solve(self, current_state: dict) -> dict:
span = current_state['maximum_tangential_force'].shape
if not self._pre_solve_checks or span != self._last_span:
self._check(span)
self._last_span = span
domain = current_state['contact_nodes']
conv_func = plan_convolve(self._im_total, self._im_total, domain,
circular=self._periodic_axes)
# if the displacements are provided by another sub model or we have a set displacement we just have one set
# of bccg iterations:
if not self.load_controlled:
if self.update_displacement:
set_displacement = self.displacement_upd(current_state['time'])
elif self.displacement_from_sub_model:
set_displacement = current_state['rigid_body_displacement_' + self.component[0]]
else:
set_displacement = self.displacement
try:
set_displacement = float(set_displacement)*np.ones_like(current_state['maximum_tangential_force'])
except TypeError:
pass
x0 = self.previous_result if self.previous_result is not None else \
current_state['maximum_tangential_force']/2
min_pressure = np.array(-1*current_state['maximum_tangential_force'][domain])
loads_in_domain, failed = bccg(conv_func, set_displacement[domain], self._tol,
self._max_it, x0[domain],
min_pressure,
current_state['maximum_tangential_force'][domain])
loads_in_domain = slippy.asnumpy(loads_in_domain)
full_loads = np.zeros_like(current_state['maximum_tangential_force'])
full_loads[domain] = loads_in_domain
stick_nodes = np.logical_and(domain, full_loads < (0.99 * current_state['maximum_tangential_force']))
current_state['stick_nodes'] = stick_nodes
tangential_deformation = slippy.asnumpy(conv_func(loads_in_domain, True))
current_state['loads_' + self.component[0]] = full_loads
if 'total_displacement_' + self.component[0] in current_state:
current_state['total_displacement_' + self.component[0]] += tangential_deformation
else:
current_state['total_displacement_' + self.component[0]] = tangential_deformation
slip_distance = set_displacement-tangential_deformation
slip_distance[stick_nodes] = 0
slip_distance[np.logical_not(domain)] = 0
current_state['slip_distance'] = slip_distance
return current_state
else:
raise NotImplementedError('Load controlled partial slip is not yet implemented')
| [
"slippy.asnumpy",
"numpy.ones_like",
"numpy.logical_and",
"slippy.core.influence_matrix_utils.bccg",
"numpy.logical_not",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.zeros_like",
"slippy.core.influence_matrix_utils.plan_convolve"
] | [((5150, 5238), 'slippy.core.influence_matrix_utils.plan_convolve', 'plan_convolve', (['self._im_total', 'self._im_total', 'domain'], {'circular': 'self._periodic_axes'}), '(self._im_total, self._im_total, domain, circular=self.\n _periodic_axes)\n', (5163, 5238), False, 'from slippy.core.influence_matrix_utils import bccg, plan_convolve\n'), ((6142, 6206), 'numpy.array', 'np.array', (["(-1 * current_state['maximum_tangential_force'][domain])"], {}), "(-1 * current_state['maximum_tangential_force'][domain])\n", (6150, 6206), True, 'import numpy as np\n'), ((6243, 6391), 'slippy.core.influence_matrix_utils.bccg', 'bccg', (['conv_func', 'set_displacement[domain]', 'self._tol', 'self._max_it', 'x0[domain]', 'min_pressure', "current_state['maximum_tangential_force'][domain]"], {}), "(conv_func, set_displacement[domain], self._tol, self._max_it, x0[\n domain], min_pressure, current_state['maximum_tangential_force'][domain])\n", (6247, 6391), False, 'from slippy.core.influence_matrix_utils import bccg, plan_convolve\n'), ((6546, 6577), 'slippy.asnumpy', 'slippy.asnumpy', (['loads_in_domain'], {}), '(loads_in_domain)\n', (6560, 6577), False, 'import slippy\n'), ((6603, 6659), 'numpy.zeros_like', 'np.zeros_like', (["current_state['maximum_tangential_force']"], {}), "(current_state['maximum_tangential_force'])\n", (6616, 6659), True, 'import numpy as np\n'), ((6735, 6825), 'numpy.logical_and', 'np.logical_and', (['domain', "(full_loads < 0.99 * current_state['maximum_tangential_force'])"], {}), "(domain, full_loads < 0.99 * current_state[\n 'maximum_tangential_force'])\n", (6749, 6825), True, 'import numpy as np\n'), ((7462, 7484), 'numpy.logical_not', 'np.logical_not', (['domain'], {}), '(domain)\n', (7476, 7484), True, 'import numpy as np\n'), ((2896, 2954), 'scipy.interpolate.interp1d', 'interp1d', (['load[0, :]', 'load[1, :]'], {'fill_value': '"""extrapolate"""'}), "(load[0, :], load[1, :], fill_value='extrapolate')\n", (2904, 2954), False, 'from scipy.interpolate import interp1d\n'), ((3351, 3425), 'scipy.interpolate.interp1d', 'interp1d', (['displacement[0, :]', 'displacement[1, :]'], {'fill_value': '"""extrapolate"""'}), "(displacement[0, :], displacement[1, :], fill_value='extrapolate')\n", (3359, 3425), False, 'from scipy.interpolate import interp1d\n'), ((5867, 5922), 'numpy.ones_like', 'np.ones_like', (["current_state['maximum_tangential_force']"], {}), "(current_state['maximum_tangential_force'])\n", (5879, 5922), True, 'import numpy as np\n')] |
from pymatgen.core.structure import Molecule
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.analyzer import PointGroupAnalyzer
from pymatgen.symmetry.analyzer import generate_full_symmops
import numpy as np
from numpy.linalg import eigh
from numpy.linalg import det
from copy import deepcopy
from math import fabs
from random import random
from random import choice as choose
from crystallography.operations import *
from crystallography.crystal import get_wyckoff_symmetry
try:
from ase.build import molecule as ase_molecule
def get_ase_mol(molname):
"""convert ase molecule to pymatgen style"""
ase_mol = ase_molecule(molname)
pos = ase_mol.get_positions()
symbols = ase_mol.get_chemical_symbols()
return(Molecule(symbols, pos))
except:
print("Could not import ASE. Install ASE for additional molecular support:")
print("https://wiki.fysik.dtu.dk/ase/install.html")
identity = np.array([[1,0,0],[0,1,0],[0,0,1]])
inversion = np.array([[-1,0,0],[0,-1,0],[0,0,-1]])
def get_inertia_tensor(mol):
'''
Calculate the symmetric inertia tensor for a Molecule object
'''
mo = mol.get_centered_molecule()
# Initialize elements of the inertia tensor
I11 = I22 = I33 = I12 = I13 = I23 = 0.0
for i in range(len(mo)):
x, y, z = mo.cart_coords[i]
m = mo[i].specie.number
I11 += m * (y ** 2 + z ** 2)
I22 += m * (x ** 2 + z ** 2)
I33 += m * (x ** 2 + y ** 2)
I12 += -m * x * y
I13 += -m * x * z
I23 += -m * y * z
return np.array([[I11, I12, I13],
[I12, I22, I23],
[I13, I23, I33]])
def get_moment_of_inertia(mol, axis, scale=1.0):
'''
Calculate the moment of inertia of a molecule about an axis
scale: changes the length scale of the molecule. Used to compare symmetry
axes for equivalence. Defaults to 1
'''
#convert axis to unit vector
axis = axis / np.linalg.norm(axis)
moment = 0
for i, a in enumerate(mol):
v = a.coords
moment += (scale * np.linalg.norm(np.cross(axis, v)) ) ** 2
return moment
def reoriented_molecule(mol, nested=False):
'''
Return a molecule reoriented so that its principle axes
are aligned with the identity matrix, and the matrix P
used to rotate the molecule into this orientation
'''
def reorient(mol):
new_mol = mol.get_centered_molecule()
A = get_inertia_tensor(new_mol)
#Store the eigenvectors of the inertia tensor
P = np.transpose(eigh(A)[1])
if det(P) < 0:
P[0] *= -1
#reorient the molecule
P = SymmOp.from_rotation_and_translation(P,[0,0,0])
new_mol.apply_operation(P)
#Our molecule should never be inverted during reorientation.
if det(P.rotation_matrix) < 0:
print("Error: inverted reorientation applied.")
return new_mol, P
#If needed, recursively apply reorientation (due to numerical errors)
iterations = 1
max_iterations = 100
new_mol, P = reorient(mol)
while iterations < max_iterations:
is_okay = True
for i in range(3):
for j in range(3):
x = eigh(get_inertia_tensor(new_mol))[1][i][j]
okay = True
if i == j:
#Check that diagonal elements are 0 or 1
if (not np.isclose(x, 0)) and (not np.isclose(x, 1)):
okay = False
else:
#Check that off-diagonal elements are 0
if (not np.isclose(x, 0)):
okay = False
if okay is False:
#If matrix is not diagonal with 1's and/or 0's, reorient
new_mol, Q = reorient(new_mol)
P = Q*P
iterations += 1
elif okay is True:
break
if iterations == max_iterations:
print("Error: Could not reorient molecule after "+str(max_iterations)+" attempts")
print(new_mol)
print(get_inertia_tensor(new_mol))
return False
return new_mol, P
def get_symmetry(mol, already_oriented=False):
'''
Return a list of SymmOps for a molecule's point symmetry
already_oriented: whether or not the principle axes of mol are already reoriented
'''
pga = PointGroupAnalyzer(mol)
#Handle linear molecules
if '*' in pga.sch_symbol:
if already_oriented == False:
#Reorient the molecule
oriented_mol, P = reoriented_molecule(mol)
pga = PointGroupAnalyzer(oriented_mol)
pg = pga.get_pointgroup()
symm_m = []
for op in pg:
symm_m.append(op)
#Add 12-fold and reflections in place of ininitesimal rotation
for axis in [[1,0,0],[0,1,0],[0,0,1]]:
op = SymmOp.from_rotation_and_translation(aa2matrix(axis, pi/6), [0,0,0])
if pga.is_valid_op(op):
symm_m.append(op)
#Any molecule with infinitesimal symmetry is linear;
#Thus, it possess mirror symmetry for any axis perpendicular
#To the rotational axis. pymatgen does not add this symmetry
#for all linear molecules - for example, hydrogen
if axis == [1,0,0]:
symm_m.append(SymmOp.from_xyz_string('x,-y,z'))
symm_m.append(SymmOp.from_xyz_string('x,y,-z'))
r = SymmOp.from_xyz_string('-x,y,-z')
'''if pga.is_valid_op(r):
symm_m.append(r)'''
elif axis == [0,1,0]:
symm_m.append(SymmOp.from_xyz_string('-x,y,z'))
symm_m.append(SymmOp.from_xyz_string('x,y,-z'))
r = SymmOp.from_xyz_string('-x,-y,z')
'''if pga.is_valid_op(r):
symm_m.append(r)'''
elif axis == [0,0,1]:
symm_m.append(SymmOp.from_xyz_string('-x,y,z'))
symm_m.append(SymmOp.from_xyz_string('x,-y,z'))
r = SymmOp.from_xyz_string('x,-y,-z')
'''if pga.is_valid_op(r):
symm_m.append(r)'''
#Generate a full list of SymmOps for the molecule's pointgroup
symm_m = generate_full_symmops(symm_m, 1e-3)
break
#Reorient the SymmOps into mol's original frame
if not already_oriented:
new = []
for op in symm_m:
new.append(P.inverse*op*P)
return new
elif already_oriented:
return symm_m
#Handle nonlinear molecules
else:
pg = pga.get_pointgroup()
symm_m = []
for op in pg:
symm_m.append(op)
return symm_m
def orientation_in_wyckoff_position(mol, sg, index, randomize=True,
exact_orientation=False, already_oriented=False, allow_inversion=False):
'''
Tests if a molecule meets the symmetry requirements of a Wyckoff position.
If it does, return the rotation matrix needed. Otherwise, returns False.
args:
mol: pymatgen.core.structure.Molecule object. Orientation is arbitrary
sg: the spacegroup to check
index: the index of the Wyckoff position within the sg to check
randomize: whether or not to apply a random rotation consistent with
the symmetry requirements.
exact_orientation: whether to only check compatibility for the provided
orientation of the molecule. Used within general case for checking.
If True, this function only returns True or False
already_oriented: whether or not to reorient the principle axes
when calling get_symmetry. Setting to True can remove redundancy,
but is not necessary.
'''
#Obtain the Wyckoff symmetry
symm_w = get_wyckoff_symmetry(sg, molecular=True)[index][0]
pga = PointGroupAnalyzer(mol)
#Check exact orientation
if exact_orientation is True:
mo = deepcopy(mol)
valid = True
for op in symm_w:
if not pga.is_valid_op(op):
valid = False
if valid is True:
return True
elif valid is False:
return False
#Obtain molecular symmetry, exact_orientation==False
symm_m = get_symmetry(mol, already_oriented=already_oriented)
#Store OperationAnalyzer objects for each molecular SymmOp
chiral = True
opa_m = []
for op_m in symm_m:
opa = OperationAnalyzer(op_m)
opa_m.append(opa)
if opa.type == "rotoinversion":
chiral = False
elif opa.type == "inversion":
chiral = False
#If molecule is chiral and allow_inversion is False,
#check if WP breaks symmetry
if chiral is True:
if allow_inversion is False:
gen_pos = get_wyckoffs(sg)[0]
for op in gen_pos:
if np.linalg.det(op.rotation_matrix) < 0:
print("Warning: cannot place chiral molecule in spagegroup #"+str(sg))
return False
#Store OperationAnalyzer objects for each Wyckoff symmetry SymmOp
opa_w = []
for op_w in symm_w:
opa_w.append(OperationAnalyzer(op_w))
#Check for constraints from the Wyckoff symmetry...
#If we find ANY two constraints (SymmOps with unique axes), the molecule's
#point group MUST contain SymmOps which can be aligned to these particular
#constraints. However, there may be multiple compatible orientations of the
#molecule consistent with these constraints
constraint1 = None
constraint2 = None
for i, op_w in enumerate(symm_w):
if opa_w[i].axis is not None:
constraint1 = opa_w[i]
for j, op_w in enumerate(symm_w):
if opa_w[j].axis is not None:
dot = np.dot(opa_w[i].axis, opa_w[j].axis)
if (not np.isclose(dot, 1, rtol=.01)) and (not np.isclose(dot, -1, rtol=.01)):
constraint2 = opa_w[j]
break
break
#Indirectly store the angle between the constraint axes
if (constraint1 is not None
and constraint2 is not None):
dot_w = np.dot(constraint1.axis, constraint2.axis)
#Generate 1st consistent molecular constraints
constraints_m = []
if constraint1 is not None:
for i, opa1 in enumerate(opa_m):
if opa1.is_conjugate(constraint1):
constraints_m.append([opa1, []])
#Generate 2nd constraint in opposite direction
extra = deepcopy(opa1)
extra.axis = [opa1.axis[0]*-1, opa1.axis[1]*-1, opa1.axis[2]*-1]
constraints_m.append([extra, []])
#Remove redundancy for the first constraints
list_i = list(range(len(constraints_m)))
list_j = list(range(len(constraints_m)))
copy = deepcopy(constraints_m)
for i , c1 in enumerate(copy):
if i in list_i:
for j , c2 in enumerate(copy):
if i > j and j in list_j and j in list_i:
#Check if axes are colinear
if np.isclose(np.dot(c1[0].axis, c2[0].axis), 1, rtol=.01):
list_i.remove(j)
list_j.remove(j)
else:# np.isclose(np.dot(c1[0].axis, c2[0].axis), -1, rtol=.01):
cond1 = False
cond2 = False
for opa in opa_m:
if opa.type == "rotation":
op = opa.op
if np.isclose(np.dot(op.operate(c1[0].axis), c2[0].axis), 1, rtol=.05):
cond1 = True
break
if cond1 is True: # or cond2 is True:
list_i.remove(j)
list_j.remove(j)
c_m = deepcopy(constraints_m)
constraints_m = []
for i in list_i:
constraints_m.append(c_m[i])
#Generate 2nd consistent molecular constraints
valid = list(range(len(constraints_m)))
if constraint2 is not None:
for i, c in enumerate(constraints_m):
opa1 = c[0]
for j, opa2 in enumerate(opa_m):
if opa2.is_conjugate(constraint2):
dot_m = np.dot(opa1.axis, opa2.axis)
#Ensure that the angles are equal
if abs(dot_m - dot_w) < .02 or abs(dot_m + dot_w) < .02:
constraints_m[i][1].append(opa2)
#Generate 2nd constraint in opposite direction
extra = deepcopy(opa2)
extra.axis = [opa2.axis[0]*-1, opa2.axis[1]*-1, opa2.axis[2]*-1]
constraints_m[i][1].append(extra)
#If no consistent constraints are found, remove first constraint
if constraints_m[i][1] == []:
valid.remove(i)
copy = deepcopy(constraints_m)
constraints_m = []
for i in valid:
constraints_m.append(copy[i])
#Generate orientations consistent with the possible constraints
orientations = []
#Loop over molecular constraint sets
for c1 in constraints_m:
v1 = c1[0].axis
v2 = constraint1.axis
T = rotate_vector(v1, v2)
#Loop over second molecular constraints
for opa in c1[1]:
phi = angle(constraint1.axis, constraint2.axis)
phi2 = angle(constraint1.axis, np.dot(T, opa.axis))
if isclose(phi, phi2, rtol=.01):
r = np.sin(phi)
c = np.linalg.norm(np.dot(T, opa.axis) - constraint2.axis)
theta = np.arccos(1 - (c**2)/(2*(r**2)))
R = aa2matrix(constraint1.axis, theta)
T2 = np.dot(R, T)
a = angle(np.dot(T2, opa.axis), constraint2.axis)
if not np.isclose(a, 0, rtol=.01):
T2 = np.dot(np.linalg.inv(R), T)
a = angle(np.dot(T2, opa.axis), constraint2.axis)
if not np.isclose(a, 0, rtol=.01):
print("Error: Generated incorrect rotation: "+str(theta))
o = orientation(T2, degrees=0)
orientations.append(o)
#If there is only one constraint
if c1[1] == []:
o = orientation(T, degrees=1, axis=constraint1.axis)
orientations.append(o)
#Ensure the identity orientation is checked if no constraints are found
if constraints_m == []:
o = orientation(np.identity(3), degrees=2)
orientations.append(o)
#Remove redundancy from orientations
list_i = list(range(len(orientations)))
list_j = list(range(len(orientations)))
for i , o1 in enumerate(orientations):
if i in list_i:
for j , o2 in enumerate(orientations):
if i > j and j in list_j and j in list_i:
m1 = o1.get_matrix(angle=0)
m2 = o2.get_matrix(angle=0)
new_op = SymmOp.from_rotation_and_translation(np.dot(m2, np.linalg.inv(m1)), [0,0,0])
P = SymmOp.from_rotation_and_translation(np.linalg.inv(m1), [0,0,0])
old_op = P*new_op*P.inverse
if pga.is_valid_op(old_op):
list_i.remove(j)
list_j.remove(j)
copy = deepcopy(orientations)
orientations = []
for i in list_i:
orientations.append(copy[i])
#Check each of the found orientations for consistency with the Wyckoff pos.
#If consistent, put into an array of valid orientations
allowed = []
for o in orientations:
if randomize is True:
op = o.get_op()
elif randomize is False:
op = o.get_op(angle=0)
mo = deepcopy(mol)
mo.apply_operation(op)
if orientation_in_wyckoff_position(mo, sg, index, exact_orientation=True, already_oriented=already_oriented) is True:
allowed.append(o)
#Return the array of allowed orientations. If there are none, return False
if allowed == []:
return False
else:
return allowed
#Test Functionality
if __name__ == "__main__":
#---------------------------------------------------
#Test cases: water, methane, and c60 via pymatgen
h2o = Molecule.from_file('xyz/water.xyz')
pga_h2o = PointGroupAnalyzer(h2o)
pg_h2o = pga_h2o.get_pointgroup()
#from ase.build import molecule
#Testing water
mol = get_ase_molecule("H2O")
print("Original molecule:")
print(mol)
print()
#Apply random rotation to avoid lucky results
R = aa2matrix(1,1,random=True)
R_op = SymmOp.from_rotation_and_translation(R,[0,0,0])
mol.apply_operation(R_op)
print("Rotated molecule:")
print(mol)
print()
pga = PointGroupAnalyzer(mol)
mol = pga.symmetrize_molecule()['sym_mol']
#orientation_in_wyckoff_position(mol, sg, WP's index in sg)
#returns a list of orientations consistent with the WP's symmetry.
#We can choose any of these orientations at random using np.random.choice
#To use an orientation, do mol.apply_operation(orientation)
#Spacegroup WP 24l (index 2) in sg 221 has m.. symmetry
allowed = orientation_in_wyckoff_position(mol, 221, 2, randomize=True)
print("Found "+str(len(allowed))+" orientations in sg 221 position 1g:")
'''for orientation in allowed:
mo = deepcopy(mol)
mo.apply_operation(orientation.get_op)
print()
print(mo)'''
| [
"numpy.arccos",
"pymatgen.core.structure.Molecule",
"numpy.array",
"pymatgen.core.operations.SymmOp.from_xyz_string",
"numpy.linalg.norm",
"numpy.sin",
"copy.deepcopy",
"pymatgen.core.structure.Molecule.from_file",
"numpy.cross",
"numpy.dot",
"numpy.linalg.eigh",
"numpy.identity",
"pymatgen.... | [((958, 1001), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (966, 1001), True, 'import numpy as np\n'), ((1006, 1052), 'numpy.array', 'np.array', (['[[-1, 0, 0], [0, -1, 0], [0, 0, -1]]'], {}), '([[-1, 0, 0], [0, -1, 0], [0, 0, -1]])\n', (1014, 1052), True, 'import numpy as np\n'), ((1582, 1643), 'numpy.array', 'np.array', (['[[I11, I12, I13], [I12, I22, I23], [I13, I23, I33]]'], {}), '([[I11, I12, I13], [I12, I22, I23], [I13, I23, I33]])\n', (1590, 1643), True, 'import numpy as np\n'), ((4373, 4396), 'pymatgen.symmetry.analyzer.PointGroupAnalyzer', 'PointGroupAnalyzer', (['mol'], {}), '(mol)\n', (4391, 4396), False, 'from pymatgen.symmetry.analyzer import PointGroupAnalyzer\n'), ((8009, 8032), 'pymatgen.symmetry.analyzer.PointGroupAnalyzer', 'PointGroupAnalyzer', (['mol'], {}), '(mol)\n', (8027, 8032), False, 'from pymatgen.symmetry.analyzer import PointGroupAnalyzer\n'), ((11011, 11034), 'copy.deepcopy', 'deepcopy', (['constraints_m'], {}), '(constraints_m)\n', (11019, 11034), False, 'from copy import deepcopy\n'), ((12077, 12100), 'copy.deepcopy', 'deepcopy', (['constraints_m'], {}), '(constraints_m)\n', (12085, 12100), False, 'from copy import deepcopy\n'), ((13148, 13171), 'copy.deepcopy', 'deepcopy', (['constraints_m'], {}), '(constraints_m)\n', (13156, 13171), False, 'from copy import deepcopy\n'), ((15590, 15612), 'copy.deepcopy', 'deepcopy', (['orientations'], {}), '(orientations)\n', (15598, 15612), False, 'from copy import deepcopy\n'), ((16538, 16573), 'pymatgen.core.structure.Molecule.from_file', 'Molecule.from_file', (['"""xyz/water.xyz"""'], {}), "('xyz/water.xyz')\n", (16556, 16573), False, 'from pymatgen.core.structure import Molecule\n'), ((16588, 16611), 'pymatgen.symmetry.analyzer.PointGroupAnalyzer', 'PointGroupAnalyzer', (['h2o'], {}), '(h2o)\n', (16606, 16611), False, 'from pymatgen.symmetry.analyzer import PointGroupAnalyzer\n'), ((16897, 16947), 'pymatgen.core.operations.SymmOp.from_rotation_and_translation', 'SymmOp.from_rotation_and_translation', (['R', '[0, 0, 0]'], {}), '(R, [0, 0, 0])\n', (16933, 16947), False, 'from pymatgen.core.operations import SymmOp\n'), ((17044, 17067), 'pymatgen.symmetry.analyzer.PointGroupAnalyzer', 'PointGroupAnalyzer', (['mol'], {}), '(mol)\n', (17062, 17067), False, 'from pymatgen.symmetry.analyzer import PointGroupAnalyzer\n'), ((653, 674), 'ase.build.molecule', 'ase_molecule', (['molname'], {}), '(molname)\n', (665, 674), True, 'from ase.build import molecule as ase_molecule\n'), ((777, 799), 'pymatgen.core.structure.Molecule', 'Molecule', (['symbols', 'pos'], {}), '(symbols, pos)\n', (785, 799), False, 'from pymatgen.core.structure import Molecule\n'), ((1983, 2003), 'numpy.linalg.norm', 'np.linalg.norm', (['axis'], {}), '(axis)\n', (1997, 2003), True, 'import numpy as np\n'), ((2681, 2731), 'pymatgen.core.operations.SymmOp.from_rotation_and_translation', 'SymmOp.from_rotation_and_translation', (['P', '[0, 0, 0]'], {}), '(P, [0, 0, 0])\n', (2717, 2731), False, 'from pymatgen.core.operations import SymmOp\n'), ((8110, 8123), 'copy.deepcopy', 'deepcopy', (['mol'], {}), '(mol)\n', (8118, 8123), False, 'from copy import deepcopy\n'), ((10341, 10383), 'numpy.dot', 'np.dot', (['constraint1.axis', 'constraint2.axis'], {}), '(constraint1.axis, constraint2.axis)\n', (10347, 10383), True, 'import numpy as np\n'), ((16017, 16030), 'copy.deepcopy', 'deepcopy', (['mol'], {}), '(mol)\n', (16025, 16030), False, 'from copy import deepcopy\n'), ((2603, 2609), 'numpy.linalg.det', 'det', (['P'], {}), '(P)\n', (2606, 2609), False, 'from numpy.linalg import det\n'), ((2844, 2866), 'numpy.linalg.det', 'det', (['P.rotation_matrix'], {}), '(P.rotation_matrix)\n', (2847, 2866), False, 'from numpy.linalg import det\n'), ((4602, 4634), 'pymatgen.symmetry.analyzer.PointGroupAnalyzer', 'PointGroupAnalyzer', (['oriented_mol'], {}), '(oriented_mol)\n', (4620, 4634), False, 'from pymatgen.symmetry.analyzer import PointGroupAnalyzer\n'), ((7948, 7988), 'crystallography.crystal.get_wyckoff_symmetry', 'get_wyckoff_symmetry', (['sg'], {'molecular': '(True)'}), '(sg, molecular=True)\n', (7968, 7988), False, 'from crystallography.crystal import get_wyckoff_symmetry\n'), ((14742, 14756), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (14753, 14756), True, 'import numpy as np\n'), ((2580, 2587), 'numpy.linalg.eigh', 'eigh', (['A'], {}), '(A)\n', (2584, 2587), False, 'from numpy.linalg import eigh\n'), ((6373, 6409), 'pymatgen.symmetry.analyzer.generate_full_symmops', 'generate_full_symmops', (['symm_m', '(0.001)'], {}), '(symm_m, 0.001)\n', (6394, 6409), False, 'from pymatgen.symmetry.analyzer import generate_full_symmops\n'), ((10714, 10728), 'copy.deepcopy', 'deepcopy', (['opa1'], {}), '(opa1)\n', (10722, 10728), False, 'from copy import deepcopy\n'), ((13679, 13698), 'numpy.dot', 'np.dot', (['T', 'opa.axis'], {}), '(T, opa.axis)\n', (13685, 13698), True, 'import numpy as np\n'), ((13765, 13776), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (13771, 13776), True, 'import numpy as np\n'), ((13876, 13912), 'numpy.arccos', 'np.arccos', (['(1 - c ** 2 / (2 * r ** 2))'], {}), '(1 - c ** 2 / (2 * r ** 2))\n', (13885, 13912), True, 'import numpy as np\n'), ((13985, 13997), 'numpy.dot', 'np.dot', (['R', 'T'], {}), '(R, T)\n', (13991, 13997), True, 'import numpy as np\n'), ((2114, 2131), 'numpy.cross', 'np.cross', (['axis', 'v'], {}), '(axis, v)\n', (2122, 2131), True, 'import numpy as np\n'), ((5501, 5534), 'pymatgen.core.operations.SymmOp.from_xyz_string', 'SymmOp.from_xyz_string', (['"""-x,y,-z"""'], {}), "('-x,y,-z')\n", (5523, 5534), False, 'from pymatgen.core.operations import SymmOp\n'), ((9027, 9060), 'numpy.linalg.det', 'np.linalg.det', (['op.rotation_matrix'], {}), '(op.rotation_matrix)\n', (9040, 9060), True, 'import numpy as np\n'), ((9964, 10000), 'numpy.dot', 'np.dot', (['opa_w[i].axis', 'opa_w[j].axis'], {}), '(opa_w[i].axis, opa_w[j].axis)\n', (9970, 10000), True, 'import numpy as np\n'), ((12504, 12532), 'numpy.dot', 'np.dot', (['opa1.axis', 'opa2.axis'], {}), '(opa1.axis, opa2.axis)\n', (12510, 12532), True, 'import numpy as np\n'), ((14024, 14044), 'numpy.dot', 'np.dot', (['T2', 'opa.axis'], {}), '(T2, opa.axis)\n', (14030, 14044), True, 'import numpy as np\n'), ((14087, 14114), 'numpy.isclose', 'np.isclose', (['a', '(0)'], {'rtol': '(0.01)'}), '(a, 0, rtol=0.01)\n', (14097, 14114), True, 'import numpy as np\n'), ((14194, 14214), 'numpy.dot', 'np.dot', (['T2', 'opa.axis'], {}), '(T2, opa.axis)\n', (14200, 14214), True, 'import numpy as np\n'), ((14257, 14284), 'numpy.isclose', 'np.isclose', (['a', '(0)'], {'rtol': '(0.01)'}), '(a, 0, rtol=0.01)\n', (14267, 14284), True, 'import numpy as np\n'), ((3627, 3643), 'numpy.isclose', 'np.isclose', (['x', '(0)'], {}), '(x, 0)\n', (3637, 3643), True, 'import numpy as np\n'), ((5375, 5407), 'pymatgen.core.operations.SymmOp.from_xyz_string', 'SymmOp.from_xyz_string', (['"""x,-y,z"""'], {}), "('x,-y,z')\n", (5397, 5407), False, 'from pymatgen.core.operations import SymmOp\n'), ((5443, 5475), 'pymatgen.core.operations.SymmOp.from_xyz_string', 'SymmOp.from_xyz_string', (['"""x,y,-z"""'], {}), "('x,y,-z')\n", (5465, 5475), False, 'from pymatgen.core.operations import SymmOp\n'), ((5823, 5856), 'pymatgen.core.operations.SymmOp.from_xyz_string', 'SymmOp.from_xyz_string', (['"""-x,-y,z"""'], {}), "('-x,-y,z')\n", (5845, 5856), False, 'from pymatgen.core.operations import SymmOp\n'), ((11277, 11307), 'numpy.dot', 'np.dot', (['c1[0].axis', 'c2[0].axis'], {}), '(c1[0].axis, c2[0].axis)\n', (11283, 11307), True, 'import numpy as np\n'), ((12824, 12838), 'copy.deepcopy', 'deepcopy', (['opa2'], {}), '(opa2)\n', (12832, 12838), False, 'from copy import deepcopy\n'), ((13812, 13831), 'numpy.dot', 'np.dot', (['T', 'opa.axis'], {}), '(T, opa.axis)\n', (13818, 13831), True, 'import numpy as np\n'), ((14147, 14163), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (14160, 14163), True, 'import numpy as np\n'), ((15373, 15390), 'numpy.linalg.inv', 'np.linalg.inv', (['m1'], {}), '(m1)\n', (15386, 15390), True, 'import numpy as np\n'), ((3434, 3450), 'numpy.isclose', 'np.isclose', (['x', '(0)'], {}), '(x, 0)\n', (3444, 3450), True, 'import numpy as np\n'), ((3461, 3477), 'numpy.isclose', 'np.isclose', (['x', '(1)'], {}), '(x, 1)\n', (3471, 3477), True, 'import numpy as np\n'), ((5697, 5729), 'pymatgen.core.operations.SymmOp.from_xyz_string', 'SymmOp.from_xyz_string', (['"""-x,y,z"""'], {}), "('-x,y,z')\n", (5719, 5729), False, 'from pymatgen.core.operations import SymmOp\n'), ((5765, 5797), 'pymatgen.core.operations.SymmOp.from_xyz_string', 'SymmOp.from_xyz_string', (['"""x,y,-z"""'], {}), "('x,y,-z')\n", (5787, 5797), False, 'from pymatgen.core.operations import SymmOp\n'), ((6145, 6178), 'pymatgen.core.operations.SymmOp.from_xyz_string', 'SymmOp.from_xyz_string', (['"""x,-y,-z"""'], {}), "('x,-y,-z')\n", (6167, 6178), False, 'from pymatgen.core.operations import SymmOp\n'), ((10029, 10058), 'numpy.isclose', 'np.isclose', (['dot', '(1)'], {'rtol': '(0.01)'}), '(dot, 1, rtol=0.01)\n', (10039, 10058), True, 'import numpy as np\n'), ((10068, 10098), 'numpy.isclose', 'np.isclose', (['dot', '(-1)'], {'rtol': '(0.01)'}), '(dot, -1, rtol=0.01)\n', (10078, 10098), True, 'import numpy as np\n'), ((15283, 15300), 'numpy.linalg.inv', 'np.linalg.inv', (['m1'], {}), '(m1)\n', (15296, 15300), True, 'import numpy as np\n'), ((6019, 6051), 'pymatgen.core.operations.SymmOp.from_xyz_string', 'SymmOp.from_xyz_string', (['"""-x,y,z"""'], {}), "('-x,y,z')\n", (6041, 6051), False, 'from pymatgen.core.operations import SymmOp\n'), ((6087, 6119), 'pymatgen.core.operations.SymmOp.from_xyz_string', 'SymmOp.from_xyz_string', (['"""x,-y,z"""'], {}), "('x,-y,z')\n", (6109, 6119), False, 'from pymatgen.core.operations import SymmOp\n')] |
import numpy as np
class Placeable:
def __init__(self, width, height, dic, msg=True):
self.size = 0
self.width = width
self.height = height
self.div, self.i, self.j, self.k = [], [], [], []
self.invP = np.full((2, self.height, self.width, 0), np.nan, dtype="int")
self._compute(dic.word)
if msg is True:
print(f"Imported Dictionary name: `{dic.name}`, size: {dic.size}")
print(f"Placeable size : {self.size}")
def _compute(self, word, baseK=0):
if type(word) is str:
word = [word]
if self.size is 0 or baseK is not 0:
ap = np.full((2, self.height, self.width, len(word)), np.nan, dtype="int")
self.invP = np.append(self.invP, ap, axis=3)
for div in (0,1):
for k,w in enumerate(word):
if div == 0:
iMax = self.height - len(w) + 1
jMax = self.width
elif div == 1:
iMax = self.height
jMax = self.width - len(w) + 1
for i in range(iMax):
for j in range(jMax):
self.invP[div,i,j,baseK+k] = len(self.div)
self.div.append(div)
self.i.append(i)
self.j.append(j)
self.k.append(baseK+k)
self.size = len(self.k)
def __len__(self):
return self.size
def __getitem__(self, key):
if type(key) in (int, np.int):
return {"div": self.div[key], "i": self.i[key], "j": self.j[key], "k": self.k[key]}
if type(key) is str:
return eval(f"self.{key}")
| [
"numpy.append",
"numpy.full"
] | [((248, 309), 'numpy.full', 'np.full', (['(2, self.height, self.width, 0)', 'np.nan'], {'dtype': '"""int"""'}), "((2, self.height, self.width, 0), np.nan, dtype='int')\n", (255, 309), True, 'import numpy as np\n'), ((750, 782), 'numpy.append', 'np.append', (['self.invP', 'ap'], {'axis': '(3)'}), '(self.invP, ap, axis=3)\n', (759, 782), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from codecarbon.external.hardware import RAM
# TODO: need help: test multiprocess case
class TestRAM(unittest.TestCase):
def test_ram_diff(self):
ram = RAM(tracking_mode="process")
for array_size in [
# (10, 10), # too small to be noticed
# (100, 100), # too small to be noticed
(1000, 1000), # ref for atol
(10, 1000, 1000),
(20, 1000, 1000),
(100, 1000, 1000),
(200, 1000, 1000),
(1000, 1000, 1000),
(2000, 1000, 1000),
]:
with self.subTest(array_size=array_size):
ref_W = ram.total_power().W
array = np.ones(array_size, dtype=np.int8)
new_W = ram.total_power().W
n_gb = array.nbytes / (1000 ** 3)
n_gb_W = (new_W - ref_W) / ram.power_per_GB
is_close = np.isclose(n_gb, n_gb_W, atol=1e-3)
self.assertTrue(
is_close,
msg=f"{array_size}, {n_gb}, {n_gb_W}, {is_close}",
)
del array
| [
"codecarbon.external.hardware.RAM",
"numpy.ones",
"numpy.isclose"
] | [((204, 232), 'codecarbon.external.hardware.RAM', 'RAM', ([], {'tracking_mode': '"""process"""'}), "(tracking_mode='process')\n", (207, 232), False, 'from codecarbon.external.hardware import RAM\n'), ((727, 761), 'numpy.ones', 'np.ones', (['array_size'], {'dtype': 'np.int8'}), '(array_size, dtype=np.int8)\n', (734, 761), True, 'import numpy as np\n'), ((943, 979), 'numpy.isclose', 'np.isclose', (['n_gb', 'n_gb_W'], {'atol': '(0.001)'}), '(n_gb, n_gb_W, atol=0.001)\n', (953, 979), True, 'import numpy as np\n')] |
#/ Type: DRS
#/ Name: Hf Isotopes Example
#/ Authors: <NAME> and <NAME>
#/ Description: A Hf isotopes example
#/ References: None
#/ Version: 1.0
#/ Contact: <EMAIL>
from iolite import QtGui
import numpy as np
def runDRS():
drs.message("Starting Hf isotopes DRS...")
drs.progress(0)
# Get settings
settings = drs.settings()
print(settings)
indexChannel = data.timeSeries(settings["IndexChannel"])
maskChannel = data.timeSeries(settings["MaskChannel"])
rmName = settings["ReferenceMaterial"]
cutoff = settings["MaskCutoff"]
trim = settings["MaskTrim"]
HfTrue = settings["HfTrue"]
Yb31 = settings["Yb31"]
Yb63 = settings["Yb63"]
age = settings["Age"]
propErrors = settings["PropagateError"]
# Create debug messages for the settings being used
IoLog.debug("indexChannelName = %s" % indexChannel.name)
IoLog.debug("maskChannelName = %s" % maskChannel.name)
IoLog.debug("maskCutoff = %f" % cutoff)
IoLog.debug("maskTrim = %f" % trim)
# Setup index time
drs.message("Setting up index time...")
drs.progress(5)
drs.setIndexChannel(indexChannel)
# Setup the mask
drs.message("Making mask...")
drs.progress(10)
mask = drs.createMaskFromCutoff(maskChannel, cutoff, trim)
# Interp onto index time and baseline subtract
drs.message("Interpolating onto index time and baseline subtracting...")
drs.progress(25)
allInputChannels = data.timeSeriesList(data.Input)
for counter, channel in enumerate(allInputChannels):
drs.message("Baseline subtracting %s" % channel.name)
drs.progress(25 + 50*counter/len(allInputChannels))
drs.baselineSubtract(data.selectionGroup("Baseline_1"), [allInputChannels[counter]], mask, 25, 75)
HfLuYb176 = data.timeSeries("Hf176_CPS").data()
Hf177 = data.timeSeries("Hf177_CPS").data()
Hf178 = data.timeSeries("Hf178_CPS").data()
Hf179 = data.timeSeries("Hf179_CPS").data()
Yb171 = data.timeSeries("Yb171_CPS").data()
Yb173 = data.timeSeries("Yb173_CPS").data()
Lu175 = data.timeSeries("Lu175_CPS").data()
HfFract = (np.log(HfTrue / (Hf179/Hf177))) / (np.log(178.946 / 176.943))*mask
YbFract = (np.log(Yb31 /(Yb173/Yb171))) / (np.log(172.938222 / 170.936338))*mask
Yb176 = Yb173 * (Yb63 / (np.power((175.942576 / 172.938222) , YbFract)))
Lu176 = Lu175 * (0.02656 / (np.power((175.942694 / 174.9408) , YbFract)))
Hf176c = HfLuYb176 - Yb176 - Lu176
LuHf176 = HfLuYb176 - Yb176
YbHf176 = HfLuYb176 - Lu176
Yb_PPM_on_176 = (Yb176 / HfLuYb176) * 1000000*mask
Lu_PPM_on_176 = (Lu176 / HfLuYb176) * 1000000*mask
Hf176_177_Raw = (HfLuYb176 / Hf177) * np.power((175.941 / 176.943) , HfFract)*mask
Hf176_177_Corr = (Hf176c/ Hf177) * np.power((175.941 / 176.943) , HfFract)*mask
Hf176_177_LuCorr = (YbHf176 / Hf177) * np.power((175.941 / 176.943) , HfFract)*mask
Hf176_177_YbCorr = (LuHf176 / Hf177) * np.power((175.941 / 176.943) , HfFract)*mask
Hf178_177 = (Hf178 / Hf177) * np.power((177.944 / 176.943) , HfFract)*mask
Lu176_Hf177_Raw = (Lu176 / Hf177)*mask
Lu176_Hf177_Corr = (Lu176 / Hf177) * np.power((175.942694 / 176.943), (0.5*(HfFract + YbFract)))*mask
Yb176_Hf177_Raw = (Yb176 / Hf177)*mask
Yb176_Hf177_Corr = (Yb176 / Hf177) * np.power((175.942576 / 176.943), (0.5*(HfFract + YbFract)))*mask
TotalHfBeam = Hf178 / 0.27297
data.createTimeSeries("Hf176_177_Corr", data.Output, indexChannel.time(), Hf176_177_Corr)
data.createTimeSeries("Hf178_177", data.Output, indexChannel.time(), Hf178_177)
#Adding Lu176/177 ratio here
data.createTimeSeries("Lu176_Hf177_Corr", data.Output, indexChannel.time(), Lu176_Hf177_Corr)
StdSpline_Hf176_177 = data.spline(rmName, "Hf176_177_Corr").data()
StdValue_Hf176_177 = data.referenceMaterialData(rmName)["176Hf/177Hf"].value()
print("StdSpline_Hf176_177 mean = %f"%StdSpline_Hf176_177.mean())
print("StdValue_Hf176_177 = %f"%StdValue_Hf176_177)
StdCorr_Hf176_177 = (Hf176_177_Corr)* StdValue_Hf176_177 / StdSpline_Hf176_177
data.createTimeSeries("StdCorr_Hf176_177", data.Output, indexChannel.time(), StdCorr_Hf176_177)
#StdSpline_Hf178_177 = data.spline(rmName, "Hf178_177").data()
#StdValue_Hf178_177 = data.referenceMaterialData(rmName)["178Hf/177Hf"].value()
#StdCorr_Hf178_177= (Hf178_177)* StdValue_Hf178_177 / StdSpline_Hf178_177
if propErrors:
groups = [s for s in data.selectionGroupList() if s.type != data.Baseline]
data.propagateErrors(groups, [data.timeSeries("StdCorr_Hf176_177")], data.timeSeries("Hf176_177_Corr"), rmName)
drs.message("Finished!")
drs.progress(100)
drs.finished()
def settingsWidget():
"""
This function puts together a user interface to configure the DRS.
It is important to have the last line of this function call:
drs.setSettingsWidget(widget)
"""
widget = QtGui.QWidget()
formLayout = QtGui.QFormLayout()
widget.setLayout(formLayout)
timeSeriesNames = data.timeSeriesNames(data.Input)
defaultChannelName = ""
if timeSeriesNames:
defaultChannelName = timeSeriesNames[0]
rmNames = data.selectionGroupNames(data.ReferenceMaterial)
drs.setSetting("IndexChannel", defaultChannelName)
drs.setSetting("ReferenceMaterial", "Z_Plesovice")
drs.setSetting("MaskChannel", defaultChannelName)
drs.setSetting("MaskCutoff", 0.1)
drs.setSetting("MaskTrim", 0.0)
drs.setSetting("HfTrue", 0.7325)
drs.setSetting("Yb31", 1.132685)
drs.setSetting("Yb63", 0.796218)
drs.setSetting("Age", 0)
drs.setSetting("PropagateError", False)
settings = drs.settings()
indexComboBox = QtGui.QComboBox(widget)
indexComboBox.addItems(timeSeriesNames)
indexComboBox.setCurrentText(settings["IndexChannel"])
indexComboBox.currentTextChanged.connect(lambda t: drs.setSetting("IndexChannel", t))
formLayout.addRow("Index channel", indexComboBox)
rmComboBox = QtGui.QComboBox(widget)
rmComboBox.addItems(rmNames)
rmComboBox.setCurrentText(settings["ReferenceMaterial"])
rmComboBox.currentTextChanged.connect(lambda t: drs.setSetting("ReferenceMaterial", t))
formLayout.addRow("Reference material", rmComboBox)
maskComboBox = QtGui.QComboBox(widget)
maskComboBox.addItems(data.timeSeriesNames(data.Input))
maskComboBox.setCurrentText(settings["MaskChannel"])
maskComboBox.currentTextChanged.connect(lambda t: drs.setSetting("MaskChannel", t))
formLayout.addRow("Mask channel", maskComboBox)
maskLineEdit = QtGui.QLineEdit(widget)
maskLineEdit.setText(settings["MaskCutoff"])
maskLineEdit.textChanged.connect(lambda t: drs.setSetting("MaskCutoff", float(t)))
formLayout.addRow("Mask cutoff", maskLineEdit)
maskTrimLineEdit = QtGui.QLineEdit(widget)
maskTrimLineEdit.setText(settings["MaskTrim"])
maskTrimLineEdit.textChanged.connect(lambda t: drs.setSetting("MaskTrim", float(t)))
formLayout.addRow("Mask trim", maskTrimLineEdit)
hfTrueLineEdit = QtGui.QLineEdit(widget)
hfTrueLineEdit.setText(settings["HfTrue"])
hfTrueLineEdit.textChanged.connect(lambda t: drs.setSetting("HfTrue", float(t)))
formLayout.addRow("Hf true", hfTrueLineEdit)
Yb31LineEdit = QtGui.QLineEdit(widget)
Yb31LineEdit.setText(settings["Yb31"])
Yb31LineEdit.textChanged.connect(lambda t: drs.setSetting("Yb31", float(t)))
formLayout.addRow("173Yb/171Yb", Yb31LineEdit)
Yb63LineEdit = QtGui.QLineEdit(widget)
Yb63LineEdit.setText(settings["Yb63"])
Yb63LineEdit.textChanged.connect(lambda t: drs.setSetting("Yb63", float(t)))
formLayout.addRow("176Yb/173Yb", Yb63LineEdit)
ageLineEdit = QtGui.QLineEdit(widget)
ageLineEdit.setText(settings["Age"])
ageLineEdit.textChanged.connect(lambda t: drs.setSetting("Age", float(t)))
formLayout.addRow("Age", ageLineEdit)
propCheckBox = QtGui.QCheckBox(widget)
propCheckBox.setChecked(settings["PropagateError"])
propCheckBox.toggled.connect(lambda t: drs.setSetting("PropagateError", bool(t)))
formLayout.addRow("PropagateError", propCheckBox)
drs.setSettingsWidget(widget)
| [
"iolite.QtGui.QWidget",
"numpy.power",
"iolite.QtGui.QFormLayout",
"numpy.log",
"iolite.QtGui.QCheckBox",
"iolite.QtGui.QComboBox",
"iolite.QtGui.QLineEdit"
] | [((4679, 4694), 'iolite.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (4692, 4694), False, 'from iolite import QtGui\n'), ((4709, 4728), 'iolite.QtGui.QFormLayout', 'QtGui.QFormLayout', ([], {}), '()\n', (4726, 4728), False, 'from iolite import QtGui\n'), ((5400, 5423), 'iolite.QtGui.QComboBox', 'QtGui.QComboBox', (['widget'], {}), '(widget)\n', (5415, 5423), False, 'from iolite import QtGui\n'), ((5674, 5697), 'iolite.QtGui.QComboBox', 'QtGui.QComboBox', (['widget'], {}), '(widget)\n', (5689, 5697), False, 'from iolite import QtGui\n'), ((5949, 5972), 'iolite.QtGui.QComboBox', 'QtGui.QComboBox', (['widget'], {}), '(widget)\n', (5964, 5972), False, 'from iolite import QtGui\n'), ((6235, 6258), 'iolite.QtGui.QLineEdit', 'QtGui.QLineEdit', (['widget'], {}), '(widget)\n', (6250, 6258), False, 'from iolite import QtGui\n'), ((6458, 6481), 'iolite.QtGui.QLineEdit', 'QtGui.QLineEdit', (['widget'], {}), '(widget)\n', (6473, 6481), False, 'from iolite import QtGui\n'), ((6685, 6708), 'iolite.QtGui.QLineEdit', 'QtGui.QLineEdit', (['widget'], {}), '(widget)\n', (6700, 6708), False, 'from iolite import QtGui\n'), ((6898, 6921), 'iolite.QtGui.QLineEdit', 'QtGui.QLineEdit', (['widget'], {}), '(widget)\n', (6913, 6921), False, 'from iolite import QtGui\n'), ((7105, 7128), 'iolite.QtGui.QLineEdit', 'QtGui.QLineEdit', (['widget'], {}), '(widget)\n', (7120, 7128), False, 'from iolite import QtGui\n'), ((7311, 7334), 'iolite.QtGui.QLineEdit', 'QtGui.QLineEdit', (['widget'], {}), '(widget)\n', (7326, 7334), False, 'from iolite import QtGui\n'), ((7505, 7528), 'iolite.QtGui.QCheckBox', 'QtGui.QCheckBox', (['widget'], {}), '(widget)\n', (7520, 7528), False, 'from iolite import QtGui\n'), ((1997, 2029), 'numpy.log', 'np.log', (['(HfTrue / (Hf179 / Hf177))'], {}), '(HfTrue / (Hf179 / Hf177))\n', (2003, 2029), True, 'import numpy as np\n'), ((2032, 2057), 'numpy.log', 'np.log', (['(178.946 / 176.943)'], {}), '(178.946 / 176.943)\n', (2038, 2057), True, 'import numpy as np\n'), ((2076, 2106), 'numpy.log', 'np.log', (['(Yb31 / (Yb173 / Yb171))'], {}), '(Yb31 / (Yb173 / Yb171))\n', (2082, 2106), True, 'import numpy as np\n'), ((2108, 2139), 'numpy.log', 'np.log', (['(172.938222 / 170.936338)'], {}), '(172.938222 / 170.936338)\n', (2114, 2139), True, 'import numpy as np\n'), ((2172, 2214), 'numpy.power', 'np.power', (['(175.942576 / 172.938222)', 'YbFract'], {}), '(175.942576 / 172.938222, YbFract)\n', (2180, 2214), True, 'import numpy as np\n'), ((2249, 2289), 'numpy.power', 'np.power', (['(175.942694 / 174.9408)', 'YbFract'], {}), '(175.942694 / 174.9408, YbFract)\n', (2257, 2289), True, 'import numpy as np\n'), ((2532, 2568), 'numpy.power', 'np.power', (['(175.941 / 176.943)', 'HfFract'], {}), '(175.941 / 176.943, HfFract)\n', (2540, 2568), True, 'import numpy as np\n'), ((2613, 2649), 'numpy.power', 'np.power', (['(175.941 / 176.943)', 'HfFract'], {}), '(175.941 / 176.943, HfFract)\n', (2621, 2649), True, 'import numpy as np\n'), ((2698, 2734), 'numpy.power', 'np.power', (['(175.941 / 176.943)', 'HfFract'], {}), '(175.941 / 176.943, HfFract)\n', (2706, 2734), True, 'import numpy as np\n'), ((2783, 2819), 'numpy.power', 'np.power', (['(175.941 / 176.943)', 'HfFract'], {}), '(175.941 / 176.943, HfFract)\n', (2791, 2819), True, 'import numpy as np\n'), ((2859, 2895), 'numpy.power', 'np.power', (['(177.944 / 176.943)', 'HfFract'], {}), '(177.944 / 176.943, HfFract)\n', (2867, 2895), True, 'import numpy as np\n'), ((2982, 3039), 'numpy.power', 'np.power', (['(175.942694 / 176.943)', '(0.5 * (HfFract + YbFract))'], {}), '(175.942694 / 176.943, 0.5 * (HfFract + YbFract))\n', (2990, 3039), True, 'import numpy as np\n'), ((3125, 3182), 'numpy.power', 'np.power', (['(175.942576 / 176.943)', '(0.5 * (HfFract + YbFract))'], {}), '(175.942576 / 176.943, 0.5 * (HfFract + YbFract))\n', (3133, 3182), True, 'import numpy as np\n')] |
import json
import os
import sys
import numpy as np
import logging
logging.basicConfig(level=logging.DEBUG)
from stage1_active_pref_learning import process_cmd_line_args, save_selected_results, save_selected_results_allreps
# import matplotlib
# from matplotlib.ticker import MultipleLocator
# matplotlib.use("Agg")
#
# import matplotlib.pyplot as plt
#
# def plot_metrics_per_topic(chosen_metrics, all_results, figs, avg_figs, nqueriers, qidx, querier_type, n_inter_rounds, learner_type_str):
# for i, chosen_metric in enumerate(chosen_metrics): # these are the metrics we really care about
#
# results = np.mean(all_results[chosen_metric], axis=0)
# ntopics = len(results) if not np.isscalar(results) else 1
#
# colors = plt.rcParams['axes.prop_cycle'].by_key()[
# 'color'] # ['blue', 'red', 'green', 'yellow', 'orange', 'purple']
#
# plt.figure(figs[i].number)
# plt.bar(np.arange(ntopics) + (qidx / float(nqueriers + 1)), results, color=colors[qidx],
# width=1.0 / (nqueriers + 1.0), label=querier_type)
#
# plt.figure(avg_figs[i].number)
# plt.bar(qidx, np.mean(results), width=0.8, color=colors[qidx], label=querier_type, zorder=3)
# plt.title('Performance after %i interactions with %s' % (n_inter_rounds, learner_type_str))
#
# def save_plots(figs, avg_figs, timestamp, chosen_metrics):
# if not os.path.exists('./plots'):
# os.mkdir('./plots')
#
# plot_path = './plots/%s' % timestamp
# if not os.path.exists(plot_path):
# os.mkdir(plot_path)
# for m, f in enumerate(figs):
# plt.figure(f.number)
# plt.legend(loc='lower left')
# plt.savefig(os.path.join(plot_path, chosen_metrics[m] + '.pdf'))
#
# for m, f in enumerate(avg_figs):
# plt.figure(f.number)
# plt.legend(loc='best')
# plt.gca().yaxis.set_minor_locator(MultipleLocator(0.01))
# plt.grid(True, 'minor', zorder=0)
# plt.savefig(os.path.join(plot_path, chosen_metrics[m] + '_avg.pdf'))
if __name__ == '__main__':
learner_type, learner_type_str, n_inter_rounds, output_folder_name, querier_types, root_dir, post_weight, reps, \
seeds, n_debug, n_threads, dataset, _, _, _, _ = process_cmd_line_args(sys.argv)
# get the output path of the last repetition of the experiment
output_path = None
rep_i = 0
while output_path is None or not os.path.exists(output_path):
rep_i -= 1
if np.isscalar(reps):
rep = reps
else:
print('loading repeat idx %i in %s' % (rep_i, str(reps)))
rep = reps[rep_i]
if rep < 0:
print('Could not find any results :(')
sys.exit(0)
output_folder_r = output_folder_name + '_rep%i' % rep
output_path = root_dir + '/results/%s' % output_folder_r
print('Checking %s' % output_path)
# get a list of folders relating to this experiment
folders = []
with open('%s/folders.txt' % output_path, 'r') as fh:
for folder_name in fh:
folders.append(folder_name)
nqueriers = len(querier_types)
chosen_metrics = ['ndcg_at_1%', 'pcc', 'tau', 'ndcg_at_5%', 'ndcg_at_10%', 'rho', 'score_of_estimated_best']
nreps = len(folders) # number of repeats that were actually completed
selected_means_allreps = np.zeros((nqueriers, len(chosen_metrics)))
selected_vars_allreps = np.zeros((nqueriers, len(chosen_metrics)))
for r in range(nreps):
output_path = folders[r].strip('\n')
selected_means = np.zeros((nqueriers, len(chosen_metrics)))
selected_vars = np.zeros((nqueriers, len(chosen_metrics)))
for qidx, querier_type in enumerate(querier_types):
filename = '%s/metrics_%s_%s_%i.json' % (output_path, querier_type, learner_type_str, n_inter_rounds)
if not os.path.exists(filename):
print('Cannot find the results file %s' % filename)
continue
else:
print('Found the results file %s' % filename)
with open(filename, 'r') as fh:
all_result_dic = json.load(fh)
save_selected_results(output_path, all_result_dic, selected_means, selected_vars, selected_means_allreps,
selected_vars_allreps, chosen_metrics, querier_types, qidx)
print('Saving result summary to %s' % output_path)
save_selected_results_allreps(output_path, selected_means_allreps, selected_vars_allreps, chosen_metrics,
querier_types, nreps)
| [
"logging.basicConfig",
"os.path.exists",
"stage1_active_pref_learning.save_selected_results_allreps",
"stage1_active_pref_learning.save_selected_results",
"numpy.isscalar",
"json.load",
"sys.exit",
"stage1_active_pref_learning.process_cmd_line_args"
] | [((67, 107), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (86, 107), False, 'import logging\n'), ((2255, 2286), 'stage1_active_pref_learning.process_cmd_line_args', 'process_cmd_line_args', (['sys.argv'], {}), '(sys.argv)\n', (2276, 2286), False, 'from stage1_active_pref_learning import process_cmd_line_args, save_selected_results, save_selected_results_allreps\n'), ((4448, 4579), 'stage1_active_pref_learning.save_selected_results_allreps', 'save_selected_results_allreps', (['output_path', 'selected_means_allreps', 'selected_vars_allreps', 'chosen_metrics', 'querier_types', 'nreps'], {}), '(output_path, selected_means_allreps,\n selected_vars_allreps, chosen_metrics, querier_types, nreps)\n', (4477, 4579), False, 'from stage1_active_pref_learning import process_cmd_line_args, save_selected_results, save_selected_results_allreps\n'), ((2488, 2505), 'numpy.isscalar', 'np.isscalar', (['reps'], {}), '(reps)\n', (2499, 2505), True, 'import numpy as np\n'), ((2429, 2456), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (2443, 2456), False, 'import os\n'), ((2728, 2739), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2736, 2739), False, 'import sys\n'), ((4192, 4365), 'stage1_active_pref_learning.save_selected_results', 'save_selected_results', (['output_path', 'all_result_dic', 'selected_means', 'selected_vars', 'selected_means_allreps', 'selected_vars_allreps', 'chosen_metrics', 'querier_types', 'qidx'], {}), '(output_path, all_result_dic, selected_means,\n selected_vars, selected_means_allreps, selected_vars_allreps,\n chosen_metrics, querier_types, qidx)\n', (4213, 4365), False, 'from stage1_active_pref_learning import process_cmd_line_args, save_selected_results, save_selected_results_allreps\n'), ((3888, 3912), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (3902, 3912), False, 'import os\n'), ((4165, 4178), 'json.load', 'json.load', (['fh'], {}), '(fh)\n', (4174, 4178), False, 'import json\n')] |
import os
import numpy as np
import gym
from gym.utils import seeding
from .cake_paddle import CakePaddle, RENDER_RATIO
from .manual_control import manual_control
from pettingzoo import AECEnv
from pettingzoo.utils import wrappers
from pettingzoo.utils.agent_selector import agent_selector
from pettingzoo.utils.to_parallel import parallel_wrapper_fn
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = 'hide'
import pygame
from gym.utils import EzPickle
KERNEL_WINDOW_LENGTH = 1
def get_image(path):
image = pygame.image.load(path)
return image
def deg_to_rad(deg):
return deg * np.pi / 180
def get_flat_shape(width, height):
return int(width * height / (2 * KERNEL_WINDOW_LENGTH * KERNEL_WINDOW_LENGTH))
def original_obs_shape(screen_width, screen_height):
return (int(screen_height / KERNEL_WINDOW_LENGTH), int(screen_width / (2 * KERNEL_WINDOW_LENGTH)), 1)
def get_valid_angle(randomizer):
# generates an angle in [0, 2*np.pi) that \
# excludes (90 +- ver_deg_range), (270 +- ver_deg_range), (0 +- hor_deg_range), (180 +- hor_deg_range)
# (65, 115), (245, 295), (170, 190), (0, 10), (350, 360)
ver_deg_range = 25
hor_deg_range = 10
a1 = deg_to_rad(90 - ver_deg_range)
b1 = deg_to_rad(90 + ver_deg_range)
a2 = deg_to_rad(270 - ver_deg_range)
b2 = deg_to_rad(270 + ver_deg_range)
c1 = deg_to_rad(180 - hor_deg_range)
d1 = deg_to_rad(180 + hor_deg_range)
c2 = deg_to_rad(360 - hor_deg_range)
d2 = deg_to_rad(0 + hor_deg_range)
angle = 0
while ((angle > a1 and angle < b1) or (angle > a2 and angle < b2) or (angle > c1 and angle < d1) or (angle > c2) or (angle < d2)):
angle = 2 * np.pi * randomizer.rand()
return angle
def get_small_random_value(randomizer):
# generates a small random value between [0, 1/100)
return (1 / 100) * randomizer.rand()
class PaddleSprite(pygame.sprite.Sprite):
def __init__(self, dims, speed):
self.surf = pygame.Surface(dims)
self.rect = self.surf.get_rect()
self.speed = speed
def reset(self):
pass
def draw(self, screen):
pygame.draw.rect(screen, (255, 255, 255), self.rect)
def update(self, area, action):
# action: 1 - up, 2 - down
movepos = [0, 0]
if action > 0:
if action == 1:
movepos[1] = movepos[1] - self.speed
elif action == 2:
movepos[1] = movepos[1] + self.speed
# make sure the players stay inside the screen
newpos = self.rect.move(movepos)
if area.contains(newpos):
self.rect = newpos
def process_collision(self, b_rect, dx, dy, b_speed, paddle_type):
'''
Parameters
----------
b_rect : Ball rect
dx, dy : Ball speed along single axis
b_speed : Ball speed
Returns
-------
is_collision: 1 if ball collides with paddle
b_rect: new ball rect
b_speed: new ball speed
'''
if paddle_type == 1:
if self.rect.colliderect(b_rect):
is_collision = True
if dx < 0:
b_rect.left = self.rect.right
b_speed[0] = -b_speed[0]
# top or bottom edge
elif dy > 0:
b_rect.bottom = self.rect.top
b_speed[1] = -b_speed[1]
elif dy < 0:
b_rect.top = self.rect.bottom
b_speed[1] = -b_speed[1]
return is_collision, b_rect, b_speed
elif paddle_type == 2:
if self.rect.colliderect(b_rect):
is_collision = True
if dx > 0:
b_rect.right = self.rect.left
b_speed[0] = -b_speed[0]
# top or bottom edge
elif dy > 0:
b_rect.bottom = self.rect.top
b_speed[1] = -b_speed[1]
elif dy < 0:
b_rect.top = self.rect.bottom
b_speed[1] = -b_speed[1]
return is_collision, b_rect, b_speed
return False, b_rect, b_speed
class BallSprite(pygame.sprite.Sprite):
def __init__(self, randomizer, dims, speed, bounce_randomness=False): # def __init__(self, image, speed):
# self.surf = get_image(image)
self.surf = pygame.Surface(dims)
self.rect = self.surf.get_rect()
self.speed_val = speed
self.speed = [int(self.speed_val * np.cos(np.pi / 4)), int(self.speed_val * np.sin(np.pi / 4))]
self.bounce_randomness = bounce_randomness
self.done = False
self.hit = False
self.randomizer = randomizer
def update2(self, area, p0, p1):
(speed_x, speed_y) = self.speed
done_x, done_y = False, False
if self.speed[0] != 0:
done_x = self.move_single_axis(self.speed[0], 0, area, p0, p1)
if self.speed[1] != 0:
done_y = self.move_single_axis(0, self.speed[1], area, p0, p1)
return (done_x or done_y)
def move_single_axis(self, dx, dy, area, p0, p1):
# returns done
# move ball rect
self.rect.x += dx
self.rect.y += dy
if not area.contains(self.rect):
# bottom wall
if dy > 0:
self.rect.bottom = area.bottom
self.speed[1] = -self.speed[1]
# top wall
elif dy < 0:
self.rect.top = area.top
self.speed[1] = -self.speed[1]
# right or left walls
else:
return True
self.speed[0] = -self.speed[0]
else:
# Do ball and bat collide?
# add some randomness
r_val = 0
if self.bounce_randomness:
r_val = get_small_random_value(self.randomizer)
# ball in left half of screen
if self.rect.center[0] < area.center[0]:
is_collision, self.rect, self.speed = p0.process_collision(self.rect, dx, dy, self.speed, 1)
if is_collision:
self.speed = [self.speed[0] + np.sign(self.speed[0]) * r_val, self.speed[1] + np.sign(self.speed[1]) * r_val]
# ball in right half
else:
is_collision, self.rect, self.speed = p1.process_collision(self.rect, dx, dy, self.speed, 2)
if is_collision:
self.speed = [self.speed[0] + np.sign(self.speed[0]) * r_val, self.speed[1] + np.sign(self.speed[1]) * r_val]
return False
def draw(self, screen):
# screen.blit(self.surf, self.rect)
pygame.draw.rect(screen, (255, 255, 255), self.rect)
class CooperativePong(gym.Env):
metadata = {'render.modes': ['human', "rgb_array"]}
def __init__(self, randomizer, ball_speed=9, left_paddle_speed=12, right_paddle_speed=12, cake_paddle=True, max_cycles=900, bounce_randomness=False):
super(CooperativePong, self).__init__()
pygame.init()
self.num_agents = 2
# Display screen
self.s_width, self.s_height = 960 // RENDER_RATIO, 560 // RENDER_RATIO
self.screen = pygame.Surface((self.s_width, self.s_height)) # (960, 720) # (640, 480) # (100, 200)
self.area = self.screen.get_rect()
# define action and observation spaces
self.action_space = [gym.spaces.Discrete(3) for _ in range(self.num_agents)]
original_shape = original_obs_shape(self.s_width, self.s_height)
original_color_shape = (original_shape[0], original_shape[1], 3)
# self.observation_space = [gym.spaces.Box(low=0.0, high=1.0, shape=(original_shape), dtype=np.float32) for _ in range(self.num_agents)]
self.observation_space = [gym.spaces.Box(low=0, high=255, shape=(original_color_shape), dtype=np.uint8) for _ in range(self.num_agents)]
self.renderOn = False
# set speed
self.speed = [ball_speed, left_paddle_speed, right_paddle_speed]
self.max_cycles = max_cycles
# paddles
self.p0 = PaddleSprite((20 // RENDER_RATIO, 80 // RENDER_RATIO), left_paddle_speed)
if cake_paddle:
self.p1 = CakePaddle(right_paddle_speed)
else:
self.p1 = PaddleSprite((20 // RENDER_RATIO, 100 // RENDER_RATIO), right_paddle_speed)
self.agents = ["paddle_0", "paddle_1"] # list(range(self.num_agents))
# ball
self.ball = BallSprite(randomizer, (20 // RENDER_RATIO, 20 // RENDER_RATIO), ball_speed, bounce_randomness)
self.randomizer = randomizer
self.reinit()
def reinit(self):
self.rewards = dict(zip(self.agents, [0.0] * len(self.agents)))
self.dones = dict(zip(self.agents, [False] * len(self.agents)))
self.infos = dict(zip(self.agents, [{}] * len(self.agents)))
self.score = 0
def reset(self):
# does not return observations
# reset ball and paddle init conditions
self.ball.rect.center = self.area.center
# set the direction to an angle between [0, 2*np.pi)
angle = get_valid_angle(self.randomizer)
# angle = deg_to_rad(89)
self.ball.speed = [int(self.ball.speed_val * np.cos(angle)), int(self.ball.speed_val * np.sin(angle))]
self.p0.rect.midleft = self.area.midleft
self.p1.rect.midright = self.area.midright
self.p0.reset()
self.p1.reset()
self.p0.speed = self.speed[1]
self.p1.speed = self.speed[2]
self.done = False
self.num_frames = 0
self.reinit()
self.draw()
def close(self):
if self.renderOn:
pygame.event.pump()
pygame.display.quit()
self.renderOn = False
def enable_render(self):
self.screen = pygame.display.set_mode(self.screen.get_size())
self.renderOn = True
self.draw()
def render(self, mode='human'):
if not self.renderOn and mode == "human":
# sets self.renderOn to true and initializes display
self.enable_render()
observation = pygame.surfarray.pixels3d(self.screen)
pygame.display.flip()
return np.transpose(observation, axes=(1, 0, 2)) if mode == "rgb_array" else None
def observe(self, agent):
observation = pygame.surfarray.pixels3d(self.screen)
observation = np.rot90(observation, k=3) # now the obs is laid out as H, W as rows and cols
observation = np.fliplr(observation) # laid out in the correct order
if agent == self.agents[0]:
return observation[:, :int(observation.shape[1] / 2), :]
elif agent == self.agents[1]:
return observation[:, int(observation.shape[1] / 2):, :]
def draw(self):
# draw background
# pygame.display.get_surface().fill((0, 0, 0))
pygame.draw.rect(self.screen, (0, 0, 0), self.area)
# draw ball and paddles
self.p0.draw(self.screen)
self.p1.draw(self.screen)
self.ball.draw(self.screen)
def step(self, action, agent):
'''
Does not return anything
'''
# update p0, p1 accordingly
# action: 0: do nothing,
# action: 1: p[i] move up, 2: p[i] move down
if agent == self.agents[0]:
self.rewards = {a: 0 for a in self.agents}
self.p0.update(self.area, action)
elif agent == self.agents[1]:
self.p1.update(self.area, action)
# do the rest if not done
if not self.done:
# update ball position
self.done = self.ball.update2(self.area, self.p0, self.p1)
# do the miscellaneous stuff after the last agent has moved
# reward is the length of time ball is in play
reward = 0
# ball is out-of-bounds
if self.done:
reward = -100
self.score += reward
if not self.done:
self.num_frames += 1
# scaling reward so that the max reward is 100
reward = 100 / self.max_cycles
self.score += reward
if self.num_frames == self.max_cycles:
self.done = True
for ag in self.agents:
self.rewards[ag] = reward / self.num_agents
self.dones[ag] = self.done
self.infos[ag] = {}
if self.renderOn:
pygame.event.pump()
self.draw()
def env(**kwargs):
env = raw_env(**kwargs)
env = wrappers.AssertOutOfBoundsWrapper(env)
env = wrappers.NanNoOpWrapper(env, 0, "doing nothing")
env = wrappers.OrderEnforcingWrapper(env)
return env
parallel_env = parallel_wrapper_fn(env)
class raw_env(AECEnv, EzPickle):
# class env(MultiAgentEnv):
metadata = {'render.modes': ['human', "rgb_array"]}
def __init__(self, **kwargs):
EzPickle.__init__(self, **kwargs)
self._kwargs = kwargs
self.seed()
self.agents = self.env.agents[:]
self.possible_agents = self.agents[:]
self._agent_selector = agent_selector(self.agents)
self.agent_selection = self._agent_selector.reset()
# spaces
self.action_spaces = dict(zip(self.agents, self.env.action_space))
self.observation_spaces = dict(zip(self.agents, self.env.observation_space))
# dicts
self.observations = {}
self.rewards = self.env.rewards
self.dones = self.env.dones
self.infos = self.env.infos
self.score = self.env.score
self.display_wait = 0.0
# def convert_to_dict(self, list_of_list):
# return dict(zip(self.agents, list_of_list))
def seed(self, seed=None):
self.randomizer, seed = seeding.np_random(seed)
self.env = CooperativePong(self.randomizer, **self._kwargs)
def reset(self):
self.env.reset()
self.agents = self.possible_agents[:]
self.agent_selection = self._agent_selector.reset()
self.rewards = self.env.rewards
self._cumulative_rewards = {a: 0 for a in self.agents}
self.dones = self.env.dones
self.infos = self.env.infos
def observe(self, agent):
obs = self.env.observe(agent)
return obs
def close(self):
self.env.close()
def render(self, mode='human'):
return self.env.render(mode)
def step(self, action):
if self.dones[self.agent_selection]:
return self._was_done_step(action)
agent = self.agent_selection
if np.isnan(action):
action = 0
elif not self.action_spaces[agent].contains(action):
raise Exception('Action for agent {} must be in Discrete({}).'
'It is currently {}'.format(agent, self.action_spaces[agent].n, action))
self.env.step(action, agent)
# select next agent and observe
self.agent_selection = self._agent_selector.next()
self.rewards = self.env.rewards
self.dones = self.env.dones
self.infos = self.env.infos
self.score = self.env.score
self._cumulative_rewards[agent] = 0
self._accumulate_rewards()
self._dones_step_first()
# This was originally created, in full, by <NAME> in a different repo, and was
# added in by <NAME> (which is why he's shown as the creator in the git history)
| [
"pygame.init",
"pygame.display.quit",
"gym.utils.EzPickle.__init__",
"pygame.surfarray.pixels3d",
"numpy.rot90",
"pygame.event.pump",
"numpy.sin",
"gym.utils.seeding.np_random",
"pettingzoo.utils.wrappers.NanNoOpWrapper",
"pettingzoo.utils.wrappers.OrderEnforcingWrapper",
"pygame.display.flip",
... | [((12892, 12916), 'pettingzoo.utils.to_parallel.parallel_wrapper_fn', 'parallel_wrapper_fn', (['env'], {}), '(env)\n', (12911, 12916), False, 'from pettingzoo.utils.to_parallel import parallel_wrapper_fn\n'), ((507, 530), 'pygame.image.load', 'pygame.image.load', (['path'], {}), '(path)\n', (524, 530), False, 'import pygame\n'), ((12716, 12754), 'pettingzoo.utils.wrappers.AssertOutOfBoundsWrapper', 'wrappers.AssertOutOfBoundsWrapper', (['env'], {}), '(env)\n', (12749, 12754), False, 'from pettingzoo.utils import wrappers\n'), ((12765, 12813), 'pettingzoo.utils.wrappers.NanNoOpWrapper', 'wrappers.NanNoOpWrapper', (['env', '(0)', '"""doing nothing"""'], {}), "(env, 0, 'doing nothing')\n", (12788, 12813), False, 'from pettingzoo.utils import wrappers\n'), ((12824, 12859), 'pettingzoo.utils.wrappers.OrderEnforcingWrapper', 'wrappers.OrderEnforcingWrapper', (['env'], {}), '(env)\n', (12854, 12859), False, 'from pettingzoo.utils import wrappers\n'), ((1956, 1976), 'pygame.Surface', 'pygame.Surface', (['dims'], {}), '(dims)\n', (1970, 1976), False, 'import pygame\n'), ((2117, 2169), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', '(255, 255, 255)', 'self.rect'], {}), '(screen, (255, 255, 255), self.rect)\n', (2133, 2169), False, 'import pygame\n'), ((4412, 4432), 'pygame.Surface', 'pygame.Surface', (['dims'], {}), '(dims)\n', (4426, 4432), False, 'import pygame\n'), ((6721, 6773), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', '(255, 255, 255)', 'self.rect'], {}), '(screen, (255, 255, 255), self.rect)\n', (6737, 6773), False, 'import pygame\n'), ((7077, 7090), 'pygame.init', 'pygame.init', ([], {}), '()\n', (7088, 7090), False, 'import pygame\n'), ((7246, 7291), 'pygame.Surface', 'pygame.Surface', (['(self.s_width, self.s_height)'], {}), '((self.s_width, self.s_height))\n', (7260, 7291), False, 'import pygame\n'), ((10181, 10219), 'pygame.surfarray.pixels3d', 'pygame.surfarray.pixels3d', (['self.screen'], {}), '(self.screen)\n', (10206, 10219), False, 'import pygame\n'), ((10228, 10249), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (10247, 10249), False, 'import pygame\n'), ((10393, 10431), 'pygame.surfarray.pixels3d', 'pygame.surfarray.pixels3d', (['self.screen'], {}), '(self.screen)\n', (10418, 10431), False, 'import pygame\n'), ((10454, 10480), 'numpy.rot90', 'np.rot90', (['observation'], {'k': '(3)'}), '(observation, k=3)\n', (10462, 10480), True, 'import numpy as np\n'), ((10555, 10577), 'numpy.fliplr', 'np.fliplr', (['observation'], {}), '(observation)\n', (10564, 10577), True, 'import numpy as np\n'), ((10933, 10984), 'pygame.draw.rect', 'pygame.draw.rect', (['self.screen', '(0, 0, 0)', 'self.area'], {}), '(self.screen, (0, 0, 0), self.area)\n', (10949, 10984), False, 'import pygame\n'), ((13083, 13116), 'gym.utils.EzPickle.__init__', 'EzPickle.__init__', (['self'], {}), '(self, **kwargs)\n', (13100, 13116), False, 'from gym.utils import EzPickle\n'), ((13287, 13314), 'pettingzoo.utils.agent_selector.agent_selector', 'agent_selector', (['self.agents'], {}), '(self.agents)\n', (13301, 13314), False, 'from pettingzoo.utils.agent_selector import agent_selector\n'), ((13945, 13968), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (13962, 13968), False, 'from gym.utils import seeding\n'), ((14743, 14759), 'numpy.isnan', 'np.isnan', (['action'], {}), '(action)\n', (14751, 14759), True, 'import numpy as np\n'), ((7452, 7474), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(3)'], {}), '(3)\n', (7471, 7474), False, 'import gym\n'), ((7833, 7908), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': 'original_color_shape', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=original_color_shape, dtype=np.uint8)\n', (7847, 7908), False, 'import gym\n'), ((9736, 9755), 'pygame.event.pump', 'pygame.event.pump', ([], {}), '()\n', (9753, 9755), False, 'import pygame\n'), ((9768, 9789), 'pygame.display.quit', 'pygame.display.quit', ([], {}), '()\n', (9787, 9789), False, 'import pygame\n'), ((10265, 10306), 'numpy.transpose', 'np.transpose', (['observation'], {'axes': '(1, 0, 2)'}), '(observation, axes=(1, 0, 2))\n', (10277, 10306), True, 'import numpy as np\n'), ((12617, 12636), 'pygame.event.pump', 'pygame.event.pump', ([], {}), '()\n', (12634, 12636), False, 'import pygame\n'), ((4548, 4565), 'numpy.cos', 'np.cos', (['(np.pi / 4)'], {}), '(np.pi / 4)\n', (4554, 4565), True, 'import numpy as np\n'), ((4589, 4606), 'numpy.sin', 'np.sin', (['(np.pi / 4)'], {}), '(np.pi / 4)\n', (4595, 4606), True, 'import numpy as np\n'), ((9293, 9306), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (9299, 9306), True, 'import numpy as np\n'), ((9335, 9348), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (9341, 9348), True, 'import numpy as np\n'), ((6215, 6237), 'numpy.sign', 'np.sign', (['self.speed[0]'], {}), '(self.speed[0])\n', (6222, 6237), True, 'import numpy as np\n'), ((6263, 6285), 'numpy.sign', 'np.sign', (['self.speed[1]'], {}), '(self.speed[1])\n', (6270, 6285), True, 'import numpy as np\n'), ((6538, 6560), 'numpy.sign', 'np.sign', (['self.speed[0]'], {}), '(self.speed[0])\n', (6545, 6560), True, 'import numpy as np\n'), ((6586, 6608), 'numpy.sign', 'np.sign', (['self.speed[1]'], {}), '(self.speed[1])\n', (6593, 6608), True, 'import numpy as np\n')] |
"""Predict with the most-common-label algorithm."""
import argparse
import logging
from pathlib import Path
import muspy
import numpy as np
import tqdm
from arranger.utils import (
load_config,
reconstruct_tracks,
save_sample_flat,
setup_loggers,
)
# Load configuration
CONFIG = load_config()
def parse_arguments():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--input",
type=Path,
required=True,
help="input filename or directory",
)
parser.add_argument(
"-o", "--output_dir", type=Path, required=True, help="output directory"
)
parser.add_argument(
"-m",
"--model_filename",
type=Path,
required=True,
help="model filename",
)
parser.add_argument(
"-d",
"--dataset",
required=True,
choices=("bach", "musicnet", "nes", "lmd"),
help="dataset key",
)
parser.add_argument(
"-a",
"--audio",
action="store_true",
help="whether to write audio",
)
parser.add_argument(
"-s",
"--suffix",
default="pred",
help="suffix to the output filename(s)",
)
parser.add_argument(
"-q", "--quiet", action="store_true", help="reduce output verbosity"
)
return parser.parse_args()
def predict(music, model_filename):
"""Predict on a music."""
# Collect notes, labels and note counts
notes = []
for track in music.tracks:
# Skip drum track or empty track
if track.is_drum or not track.notes:
continue
# Collect notes and labels
for note in track.notes:
notes.append((note.time, note.pitch, note.duration, note.velocity))
# Sort the notes
notes.sort()
# Convert lists to arrays for speed reason
notes = np.array(notes, int)
# Load the learnt most common label
most_common_label = np.loadtxt(model_filename)
# Predict the labels using the optimal zone boundaries and permutation
predictions = np.full(len(notes), most_common_label)
return notes, predictions
def process(filename, args):
"""Process a file."""
# Load the data
music = muspy.load(filename)
# Get note and predicted labels
notes, predictions = predict(music, args.model_filename)
# Shorthands
programs = CONFIG[args.dataset]["programs"]
colors = CONFIG["colors"]
# Reconstruct and save the music using the predicted labels
music_pred = music.deepcopy()
music_pred.tracks = reconstruct_tracks(notes, predictions, programs)
save_sample_flat(
music_pred, args.output_dir, f"{filename.stem}_{args.suffix}", colors
)
if args.audio:
muspy.write_audio(
args.output_dir / f"{filename.stem}_{args.suffix}.wav", music_pred
)
# Save the samples with drums
if CONFIG[args.dataset]["has_drums"]:
music_pred.tracks.append(music.tracks[-1]) # append drum track
save_sample_flat(
music_pred,
args.output_dir,
f"{filename.stem}_{args.suffix}_drums",
colors,
)
if args.audio:
muspy.write_audio(
args.output_dir / f"{filename.stem}_{args.suffix}_drums.wav",
music_pred,
)
return notes, predictions
def main():
"""Main function."""
# Parse command-line arguments
args = parse_arguments()
# Check output directory
if args.output_dir is not None and not args.output_dir.is_dir():
raise NotADirectoryError("`output_dir` must be an existing directory.")
# Set up loggers
setup_loggers(
filename=args.output_dir / Path(__file__).with_suffix(".log").name,
quiet=args.quiet,
)
# Log command-line arguments
logging.debug("Running with command-line arguments :")
for arg, value in vars(args).items():
logging.debug(f"- {arg} : {value}")
# Process the file
if args.input.is_file():
process(args.input, args)
return
# Collect filenames
logging.info("Collecting filenames...")
filenames = list(args.input.glob("*.json"))
assert filenames, "No input files found. Only JSON files are supported."
# Start inference
logging.info("Start testing...")
for filename in tqdm.tqdm(filenames, disable=args.quiet, ncols=80):
process(filename, args)
if __name__ == "__main__":
main()
| [
"arranger.utils.load_config",
"muspy.load",
"muspy.write_audio",
"arranger.utils.save_sample_flat",
"logging.debug",
"argparse.ArgumentParser",
"pathlib.Path",
"tqdm.tqdm",
"arranger.utils.reconstruct_tracks",
"numpy.array",
"numpy.loadtxt",
"logging.info"
] | [((298, 311), 'arranger.utils.load_config', 'load_config', ([], {}), '()\n', (309, 311), False, 'from arranger.utils import load_config, reconstruct_tracks, save_sample_flat, setup_loggers\n'), ((390, 415), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (413, 415), False, 'import argparse\n'), ((1913, 1933), 'numpy.array', 'np.array', (['notes', 'int'], {}), '(notes, int)\n', (1921, 1933), True, 'import numpy as np\n'), ((1999, 2025), 'numpy.loadtxt', 'np.loadtxt', (['model_filename'], {}), '(model_filename)\n', (2009, 2025), True, 'import numpy as np\n'), ((2279, 2299), 'muspy.load', 'muspy.load', (['filename'], {}), '(filename)\n', (2289, 2299), False, 'import muspy\n'), ((2617, 2665), 'arranger.utils.reconstruct_tracks', 'reconstruct_tracks', (['notes', 'predictions', 'programs'], {}), '(notes, predictions, programs)\n', (2635, 2665), False, 'from arranger.utils import load_config, reconstruct_tracks, save_sample_flat, setup_loggers\n'), ((2670, 2761), 'arranger.utils.save_sample_flat', 'save_sample_flat', (['music_pred', 'args.output_dir', 'f"""{filename.stem}_{args.suffix}"""', 'colors'], {}), "(music_pred, args.output_dir,\n f'{filename.stem}_{args.suffix}', colors)\n", (2686, 2761), False, 'from arranger.utils import load_config, reconstruct_tracks, save_sample_flat, setup_loggers\n'), ((3891, 3945), 'logging.debug', 'logging.debug', (['"""Running with command-line arguments :"""'], {}), "('Running with command-line arguments :')\n", (3904, 3945), False, 'import logging\n'), ((4163, 4202), 'logging.info', 'logging.info', (['"""Collecting filenames..."""'], {}), "('Collecting filenames...')\n", (4175, 4202), False, 'import logging\n'), ((4355, 4387), 'logging.info', 'logging.info', (['"""Start testing..."""'], {}), "('Start testing...')\n", (4367, 4387), False, 'import logging\n'), ((4408, 4458), 'tqdm.tqdm', 'tqdm.tqdm', (['filenames'], {'disable': 'args.quiet', 'ncols': '(80)'}), '(filenames, disable=args.quiet, ncols=80)\n', (4417, 4458), False, 'import tqdm\n'), ((2799, 2888), 'muspy.write_audio', 'muspy.write_audio', (["(args.output_dir / f'{filename.stem}_{args.suffix}.wav')", 'music_pred'], {}), "(args.output_dir / f'{filename.stem}_{args.suffix}.wav',\n music_pred)\n", (2816, 2888), False, 'import muspy\n'), ((3064, 3161), 'arranger.utils.save_sample_flat', 'save_sample_flat', (['music_pred', 'args.output_dir', 'f"""{filename.stem}_{args.suffix}_drums"""', 'colors'], {}), "(music_pred, args.output_dir,\n f'{filename.stem}_{args.suffix}_drums', colors)\n", (3080, 3161), False, 'from arranger.utils import load_config, reconstruct_tracks, save_sample_flat, setup_loggers\n'), ((3996, 4031), 'logging.debug', 'logging.debug', (['f"""- {arg} : {value}"""'], {}), "(f'- {arg} : {value}')\n", (4009, 4031), False, 'import logging\n'), ((3252, 3347), 'muspy.write_audio', 'muspy.write_audio', (["(args.output_dir / f'{filename.stem}_{args.suffix}_drums.wav')", 'music_pred'], {}), "(args.output_dir /\n f'{filename.stem}_{args.suffix}_drums.wav', music_pred)\n", (3269, 3347), False, 'import muspy\n'), ((3780, 3794), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3784, 3794), False, 'from pathlib import Path\n')] |
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import paddle
import paddle.nn as nn
from paddle.nn.initializer import TruncatedNormal, Constant
from ppcls.arch.backbone.base.theseus_layer import Identity
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
MODEL_URLS = {
"TNT_small":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/TNT_small_pretrained.pdparams"
}
__all__ = MODEL_URLS.keys()
trunc_normal_ = TruncatedNormal(std=.02)
zeros_ = Constant(value=0.)
ones_ = Constant(value=1.)
def drop_path(x, drop_prob=0., training=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ...
"""
if drop_prob == 0. or not training:
return x
keep_prob = paddle.to_tensor(1 - drop_prob)
shape = (paddle.shape(x)[0], ) + (1, ) * (x.ndim - 1)
random_tensor = paddle.add(keep_prob, paddle.rand(shape, dtype=x.dtype))
random_tensor = paddle.floor(random_tensor) # binarize
output = x.divide(keep_prob) * random_tensor
return output
class DropPath(nn.Layer):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Layer):
def __init__(self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Layer):
def __init__(self,
dim,
hidden_dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.):
super().__init__()
self.hidden_dim = hidden_dim
self.num_heads = num_heads
head_dim = hidden_dim // num_heads
self.head_dim = head_dim
self.scale = head_dim**-0.5
self.qk = nn.Linear(dim, hidden_dim * 2, bias_attr=qkv_bias)
self.v = nn.Linear(dim, dim, bias_attr=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qk = self.qk(x).reshape(
(B, N, 2, self.num_heads, self.head_dim)).transpose(
(2, 0, 3, 1, 4))
q, k = qk[0], qk[1]
v = self.v(x).reshape(
(B, N, self.num_heads, x.shape[-1] // self.num_heads)).transpose(
(0, 2, 1, 3))
attn = paddle.matmul(q, k.transpose((0, 1, 3, 2))) * self.scale
attn = nn.functional.softmax(attn, axis=-1)
attn = self.attn_drop(attn)
x = paddle.matmul(attn, v)
x = x.transpose((0, 2, 1, 3)).reshape(
(B, N, x.shape[-1] * x.shape[-3]))
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Layer):
def __init__(self,
dim,
in_dim,
num_pixel,
num_heads=12,
in_num_head=4,
mlp_ratio=4.,
qkv_bias=False,
drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm):
super().__init__()
# Inner transformer
self.norm_in = norm_layer(in_dim)
self.attn_in = Attention(
in_dim,
in_dim,
num_heads=in_num_head,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=drop)
self.norm_mlp_in = norm_layer(in_dim)
self.mlp_in = Mlp(in_features=in_dim,
hidden_features=int(in_dim * 4),
out_features=in_dim,
act_layer=act_layer,
drop=drop)
self.norm1_proj = norm_layer(in_dim)
self.proj = nn.Linear(in_dim * num_pixel, dim)
# Outer transformer
self.norm_out = norm_layer(dim)
self.attn_out = Attention(
dim,
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity()
self.norm_mlp = norm_layer(dim)
self.mlp = Mlp(in_features=dim,
hidden_features=int(dim * mlp_ratio),
out_features=dim,
act_layer=act_layer,
drop=drop)
def forward(self, pixel_embed, patch_embed):
# inner
pixel_embed = paddle.add(
pixel_embed,
self.drop_path(self.attn_in(self.norm_in(pixel_embed))))
pixel_embed = paddle.add(
pixel_embed,
self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed))))
# outer
B, N, C = patch_embed.shape
norm1_proj = self.norm1_proj(pixel_embed)
norm1_proj = norm1_proj.reshape(
(B, N - 1, norm1_proj.shape[1] * norm1_proj.shape[2]))
patch_embed[:, 1:] = paddle.add(patch_embed[:, 1:],
self.proj(norm1_proj))
patch_embed = paddle.add(
patch_embed,
self.drop_path(self.attn_out(self.norm_out(patch_embed))))
patch_embed = paddle.add(
patch_embed, self.drop_path(self.mlp(self.norm_mlp(patch_embed))))
return pixel_embed, patch_embed
class PixelEmbed(nn.Layer):
def __init__(self,
img_size=224,
patch_size=16,
in_chans=3,
in_dim=48,
stride=4):
super().__init__()
num_patches = (img_size // patch_size)**2
self.img_size = img_size
self.num_patches = num_patches
self.in_dim = in_dim
new_patch_size = math.ceil(patch_size / stride)
self.new_patch_size = new_patch_size
self.proj = nn.Conv2D(
in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride)
def forward(self, x, pixel_pos):
B, C, H, W = x.shape
assert H == self.img_size and W == self.img_size, f"Input image size ({H}*{W}) doesn't match model ({self.img_size}*{self.img_size})."
x = self.proj(x)
x = nn.functional.unfold(x, self.new_patch_size, self.new_patch_size)
x = x.transpose((0, 2, 1)).reshape(
(-1, self.in_dim, self.new_patch_size, self.new_patch_size))
x = x + pixel_pos
x = x.reshape((-1, self.in_dim, self.new_patch_size *
self.new_patch_size)).transpose((0, 2, 1))
return x
class TNT(nn.Layer):
def __init__(self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
in_dim=48,
depth=12,
num_heads=12,
in_num_head=4,
mlp_ratio=4.,
qkv_bias=False,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=nn.LayerNorm,
first_stride=4,
class_num=1000):
super().__init__()
self.class_num = class_num
# num_features for consistency with other models
self.num_features = self.embed_dim = embed_dim
self.pixel_embed = PixelEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
in_dim=in_dim,
stride=first_stride)
num_patches = self.pixel_embed.num_patches
self.num_patches = num_patches
new_patch_size = self.pixel_embed.new_patch_size
num_pixel = new_patch_size**2
self.norm1_proj = norm_layer(num_pixel * in_dim)
self.proj = nn.Linear(num_pixel * in_dim, embed_dim)
self.norm2_proj = norm_layer(embed_dim)
self.cls_token = self.create_parameter(
shape=(1, 1, embed_dim), default_initializer=zeros_)
self.add_parameter("cls_token", self.cls_token)
self.patch_pos = self.create_parameter(
shape=(1, num_patches + 1, embed_dim), default_initializer=zeros_)
self.add_parameter("patch_pos", self.patch_pos)
self.pixel_pos = self.create_parameter(
shape=(1, in_dim, new_patch_size, new_patch_size),
default_initializer=zeros_)
self.add_parameter("pixel_pos", self.pixel_pos)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth decay rule
dpr = np.linspace(0, drop_path_rate, depth)
blocks = []
for i in range(depth):
blocks.append(
Block(
dim=embed_dim,
in_dim=in_dim,
num_pixel=num_pixel,
num_heads=num_heads,
in_num_head=in_num_head,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer))
self.blocks = nn.LayerList(blocks)
self.norm = norm_layer(embed_dim)
if class_num > 0:
self.head = nn.Linear(embed_dim, class_num)
trunc_normal_(self.cls_token)
trunc_normal_(self.patch_pos)
trunc_normal_(self.pixel_pos)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight)
if isinstance(m, nn.Linear) and m.bias is not None:
zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
zeros_(m.bias)
ones_(m.weight)
def forward_features(self, x):
B = paddle.shape(x)[0]
pixel_embed = self.pixel_embed(x, self.pixel_pos)
patch_embed = self.norm2_proj(
self.proj(
self.norm1_proj(
pixel_embed.reshape((-1, self.num_patches, pixel_embed.
shape[-1] * pixel_embed.shape[-2])))))
patch_embed = paddle.concat(
(self.cls_token.expand((B, -1, -1)), patch_embed), axis=1)
patch_embed = patch_embed + self.patch_pos
patch_embed = self.pos_drop(patch_embed)
for blk in self.blocks:
pixel_embed, patch_embed = blk(pixel_embed, patch_embed)
patch_embed = self.norm(patch_embed)
return patch_embed[:, 0]
def forward(self, x):
x = self.forward_features(x)
if self.class_num > 0:
x = self.head(x)
return x
def _load_pretrained(pretrained, model, model_url, use_ssld=False):
if pretrained is False:
pass
elif pretrained is True:
load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
elif isinstance(pretrained, str):
load_dygraph_pretrain(model, pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type."
)
def TNT_small(pretrained=False, **kwargs):
model = TNT(patch_size=16,
embed_dim=384,
in_dim=24,
depth=12,
num_heads=6,
in_num_head=4,
qkv_bias=False,
**kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["TNT_small"])
return model
| [
"paddle.rand",
"paddle.nn.Dropout",
"paddle.matmul",
"math.ceil",
"paddle.nn.Conv2D",
"paddle.shape",
"paddle.nn.LayerList",
"ppcls.utils.save_load.load_dygraph_pretrain",
"numpy.linspace",
"paddle.nn.initializer.TruncatedNormal",
"paddle.floor",
"paddle.to_tensor",
"paddle.nn.Linear",
"pa... | [((1063, 1088), 'paddle.nn.initializer.TruncatedNormal', 'TruncatedNormal', ([], {'std': '(0.02)'}), '(std=0.02)\n', (1078, 1088), False, 'from paddle.nn.initializer import TruncatedNormal, Constant\n'), ((1097, 1116), 'paddle.nn.initializer.Constant', 'Constant', ([], {'value': '(0.0)'}), '(value=0.0)\n', (1105, 1116), False, 'from paddle.nn.initializer import TruncatedNormal, Constant\n'), ((1124, 1143), 'paddle.nn.initializer.Constant', 'Constant', ([], {'value': '(1.0)'}), '(value=1.0)\n', (1132, 1143), False, 'from paddle.nn.initializer import TruncatedNormal, Constant\n'), ((1570, 1601), 'paddle.to_tensor', 'paddle.to_tensor', (['(1 - drop_prob)'], {}), '(1 - drop_prob)\n', (1586, 1601), False, 'import paddle\n'), ((1757, 1784), 'paddle.floor', 'paddle.floor', (['random_tensor'], {}), '(random_tensor)\n', (1769, 1784), False, 'import paddle\n'), ((1702, 1735), 'paddle.rand', 'paddle.rand', (['shape'], {'dtype': 'x.dtype'}), '(shape, dtype=x.dtype)\n', (1713, 1735), False, 'import paddle\n'), ((2568, 2607), 'paddle.nn.Linear', 'nn.Linear', (['in_features', 'hidden_features'], {}), '(in_features, hidden_features)\n', (2577, 2607), True, 'import paddle.nn as nn\n'), ((2658, 2698), 'paddle.nn.Linear', 'nn.Linear', (['hidden_features', 'out_features'], {}), '(hidden_features, out_features)\n', (2667, 2698), True, 'import paddle.nn as nn\n'), ((2719, 2735), 'paddle.nn.Dropout', 'nn.Dropout', (['drop'], {}), '(drop)\n', (2729, 2735), True, 'import paddle.nn as nn\n'), ((3361, 3411), 'paddle.nn.Linear', 'nn.Linear', (['dim', '(hidden_dim * 2)'], {'bias_attr': 'qkv_bias'}), '(dim, hidden_dim * 2, bias_attr=qkv_bias)\n', (3370, 3411), True, 'import paddle.nn as nn\n'), ((3429, 3468), 'paddle.nn.Linear', 'nn.Linear', (['dim', 'dim'], {'bias_attr': 'qkv_bias'}), '(dim, dim, bias_attr=qkv_bias)\n', (3438, 3468), True, 'import paddle.nn as nn\n'), ((3494, 3515), 'paddle.nn.Dropout', 'nn.Dropout', (['attn_drop'], {}), '(attn_drop)\n', (3504, 3515), True, 'import paddle.nn as nn\n'), ((3536, 3555), 'paddle.nn.Linear', 'nn.Linear', (['dim', 'dim'], {}), '(dim, dim)\n', (3545, 3555), True, 'import paddle.nn as nn\n'), ((3581, 3602), 'paddle.nn.Dropout', 'nn.Dropout', (['proj_drop'], {}), '(proj_drop)\n', (3591, 3602), True, 'import paddle.nn as nn\n'), ((4043, 4079), 'paddle.nn.functional.softmax', 'nn.functional.softmax', (['attn'], {'axis': '(-1)'}), '(attn, axis=-1)\n', (4064, 4079), True, 'import paddle.nn as nn\n'), ((4129, 4151), 'paddle.matmul', 'paddle.matmul', (['attn', 'v'], {}), '(attn, v)\n', (4142, 4151), False, 'import paddle\n'), ((5382, 5416), 'paddle.nn.Linear', 'nn.Linear', (['(in_dim * num_pixel)', 'dim'], {}), '(in_dim * num_pixel, dim)\n', (5391, 5416), True, 'import paddle.nn as nn\n'), ((7365, 7395), 'math.ceil', 'math.ceil', (['(patch_size / stride)'], {}), '(patch_size / stride)\n', (7374, 7395), False, 'import math\n'), ((7462, 7535), 'paddle.nn.Conv2D', 'nn.Conv2D', (['in_chans', 'self.in_dim'], {'kernel_size': '(7)', 'padding': '(3)', 'stride': 'stride'}), '(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride)\n', (7471, 7535), True, 'import paddle.nn as nn\n'), ((7797, 7862), 'paddle.nn.functional.unfold', 'nn.functional.unfold', (['x', 'self.new_patch_size', 'self.new_patch_size'], {}), '(x, self.new_patch_size, self.new_patch_size)\n', (7817, 7862), True, 'import paddle.nn as nn\n'), ((9349, 9389), 'paddle.nn.Linear', 'nn.Linear', (['(num_pixel * in_dim)', 'embed_dim'], {}), '(num_pixel * in_dim, embed_dim)\n', (9358, 9389), True, 'import paddle.nn as nn\n'), ((10025, 10048), 'paddle.nn.Dropout', 'nn.Dropout', ([], {'p': 'drop_rate'}), '(p=drop_rate)\n', (10035, 10048), True, 'import paddle.nn as nn\n'), ((10102, 10139), 'numpy.linspace', 'np.linspace', (['(0)', 'drop_path_rate', 'depth'], {}), '(0, drop_path_rate, depth)\n', (10113, 10139), True, 'import numpy as np\n'), ((10705, 10725), 'paddle.nn.LayerList', 'nn.LayerList', (['blocks'], {}), '(blocks)\n', (10717, 10725), True, 'import paddle.nn as nn\n'), ((5748, 5758), 'ppcls.arch.backbone.base.theseus_layer.Identity', 'Identity', ([], {}), '()\n', (5756, 5758), False, 'from ppcls.arch.backbone.base.theseus_layer import Identity\n'), ((10819, 10850), 'paddle.nn.Linear', 'nn.Linear', (['embed_dim', 'class_num'], {}), '(embed_dim, class_num)\n', (10828, 10850), True, 'import paddle.nn as nn\n'), ((11351, 11366), 'paddle.shape', 'paddle.shape', (['x'], {}), '(x)\n', (11363, 11366), False, 'import paddle\n'), ((12359, 12426), 'ppcls.utils.save_load.load_dygraph_pretrain_from_url', 'load_dygraph_pretrain_from_url', (['model', 'model_url'], {'use_ssld': 'use_ssld'}), '(model, model_url, use_ssld=use_ssld)\n', (12389, 12426), False, 'from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url\n'), ((1615, 1630), 'paddle.shape', 'paddle.shape', (['x'], {}), '(x)\n', (1627, 1630), False, 'import paddle\n'), ((12473, 12513), 'ppcls.utils.save_load.load_dygraph_pretrain', 'load_dygraph_pretrain', (['model', 'pretrained'], {}), '(model, pretrained)\n', (12494, 12513), False, 'from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url\n')] |
import csv
lines = []
with open('data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
import cv2
images = []
steerings = []
throttles = []
brakes = []
speeds = []
for line in lines[1:]:
image_path = 'data/' + line[0]
image = cv2.imread(image_path)
images.append(image)
steerings.append(float(line[3]))
throttles.append(float(line[4]))
brakes.append(float(line[5]))
speeds.append(float(line[6]))
import numpy as np
X_train = np.array(images)
y_train = np.array(steerings)
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Convolution2D, MaxPooling2D, Cropping2D, Dropout
model = Sequential()
model.add(Cropping2D(cropping=((75,25), (0,0))))
model.add(Lambda(lambda x: x/255.0-0.5, input_shape=(60,320,3)))
model.add(Convolution2D(6,5,5,activation='relu'))
model.add(MaxPooling2D())
model.add(Convolution2D(6,5,5,activation='relu'))
model.add(MaxPooling2D())
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=1)
model.save('model.h5')
| [
"keras.layers.Convolution2D",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"keras.layers.Lambda",
"keras.models.Sequential",
"numpy.array",
"keras.layers.Dropout",
"keras.layers.Cropping2D",
"csv.reader",
"keras.layers.Dense",
"cv2.imread"
] | [((531, 547), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (539, 547), True, 'import numpy as np\n'), ((558, 577), 'numpy.array', 'np.array', (['steerings'], {}), '(steerings)\n', (566, 577), True, 'import numpy as np\n'), ((722, 734), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (732, 734), False, 'from keras.models import Sequential\n'), ((82, 101), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (92, 101), False, 'import csv\n'), ((310, 332), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (320, 332), False, 'import cv2\n'), ((745, 784), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((75, 25), (0, 0))'}), '(cropping=((75, 25), (0, 0)))\n', (755, 784), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, MaxPooling2D, Cropping2D, Dropout\n'), ((794, 853), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(60, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(60, 320, 3))\n', (800, 853), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, MaxPooling2D, Cropping2D, Dropout\n'), ((859, 900), 'keras.layers.Convolution2D', 'Convolution2D', (['(6)', '(5)', '(5)'], {'activation': '"""relu"""'}), "(6, 5, 5, activation='relu')\n", (872, 900), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, MaxPooling2D, Cropping2D, Dropout\n'), ((909, 923), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (921, 923), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, MaxPooling2D, Cropping2D, Dropout\n'), ((935, 976), 'keras.layers.Convolution2D', 'Convolution2D', (['(6)', '(5)', '(5)'], {'activation': '"""relu"""'}), "(6, 5, 5, activation='relu')\n", (948, 976), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, MaxPooling2D, Cropping2D, Dropout\n'), ((985, 999), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (997, 999), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, MaxPooling2D, Cropping2D, Dropout\n'), ((1011, 1024), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (1018, 1024), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, MaxPooling2D, Cropping2D, Dropout\n'), ((1036, 1045), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1043, 1045), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, MaxPooling2D, Cropping2D, Dropout\n'), ((1057, 1065), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (1062, 1065), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, MaxPooling2D, Cropping2D, Dropout\n')] |
import os
import time
import datetime
import json
import pandas as pd
import numpy as np
from pathlib import Path
from tqdm import tqdm
from typing import List, Optional
class PrepareData:
"""
Limpiar y extraer las series de tiempo de una tabla CSV
+ Asegurar fechas continuas, completar con 0 las no registradas
+ Separar series de tiempo de acuerdo al identificador que se eliga (Ej: id_producto, id_producto + cadena)
+ Guardar todas las series de tiempo generadas con el nombre de su identificador en formato numpy. Además, guardar
un archivo json con la lista de timesteps y los nombres de las features de cada serie de tiempo
"""
def __init__(self, path_data: str, colname_datetime: str, colname_features: List[str], colname_id_time_series: str = None,):
"""
+ Los datos son cargados desde 'path_data'.
+ 'colname_datetime' corresponde a la columna que contiene las fechas.
+ Se crea una serie de tiempo por cada valor distinto en la columna 'colname_id_time_series'. Si esta es None,
se considera que los datos corresponden a una sola serie de tiempo.
"""
self.path_data = path_data
self.colname_datetime = colname_datetime
self.colname_features = colname_features
self.colname_id_time_series = colname_id_time_series
self.time_series = {} # Diccionario para guardar series de tiempo por su id
def __call__(self,):
"Cargar los datos y generar las series de tiempo"
self.load_data() # Cargar datos
self.get_id_time_series() # Obtener id de cada serie de tiempo
self.get_timesteps() # Obtener rango de fechas
self.get_minmax() # Obtener minimos y maximos por feature
self.get_mean_std() # Obtener promedio y desv std por feature
print("Generando series de tiempo")
time.sleep(1)
for id_time_serie in tqdm(self.id_time_series):
self.get_time_serie(id_time_serie)
def load_data(self,):
"Cargar datos"
ALLOWED_FILES = [".csv"] # En caso de agregar mas formas de cargar. Ej: xlsx, pickle.
# Extension del archivo proporcionado
extension = os.path.splitext(self.path_data)[-1]
# Verificar si es uno de los archivos que podemos cargar
assert extension in set(ALLOWED_FILES), "Archivo debe ser uno de estos {}. El suyo '{}'".format(ALLOWED_FILES, extension)
# Cargar el archivo
if self._file_exists(filename = self.path_data):
self.data = pd.read_csv(self.path_data)
print("Archivo cargado desde {}".format(self.path_data))
def get_id_time_series(self,):
"Definir el identificador de cada serie de tiempo a generar"
self.colname_id = "ID_ts"
self.data[self.colname_id] = self.data[self.colname_id_time_series].apply(lambda row:
"_".join([ str(c) + "-" + str(r)
for c,r in
zip(self.colname_id_time_series,row) ]), axis=1)
# Total de series de tiempo que se van a extraer
self.id_time_series = list(set(self.data[self.colname_id].tolist()))
total_id = len(self.id_time_series)
print("Se encontraron {} series de tiempo con id {}.".format(total_id, self.colname_id))
def get_time_serie(self, id_time_serie):
"""Obtener serie de tiempo para un id, en el rango total de fechas.
Guardar la serie de tiempo generada en el atributo .time_series
"""
# Extraer datos de la serie de tiempo solicitada
cols = [self.colname_datetime]
cols.extend(self.colname_features)
time_serie = self.data.query("`ID_ts` == '{}'".format(id_time_serie))[cols].copy()
time_serie_by_date = {d.get(self.colname_datetime): [d.get(feature) for feature in self.colname_features] for d in time_serie.to_dict("records")}
# Extraer las fechas
dates_time_serie = list(time_serie_by_date.keys())
# Construir la serie de tiempo en el rango total de fechas
rows = []
for date in self.timesteps:
str_date = self.date_to_str(date)
if str_date in dates_time_serie:
date_values = time_serie_by_date.get(str_date)
#info_date = time_serie_by_date.get(str_date)
#date_values = info_date#[info_date for feature in self.colname_features]
else:
date_values = [0 for _ in self.colname_features]
rows.append(date_values)
self.time_series[id_time_serie] = np.array(rows)
def get_timesteps(self,):
"Obtener rango de fechas"
# Obtener la columna con todas las fechas
dates = self.data[self.colname_datetime].tolist()
# Transformar a datetime
dates = [self.str_to_date(date) for date in dates]
# Calcular fecha minima y maxima
self.min_date = min(dates)
self.max_date = max(dates)
# Obtener el listado de timesteps
n_days = (self.max_date-self.min_date).days + 1 # todos los dias incluidos inicial y final
self.timesteps = [ self.add_days(self.min_date, days) for days in range(n_days)]
print(f"Datos desde {self.date_to_str(self.min_date)} hasta {self.date_to_str(self.max_date)}, ({n_days} dias) ")
def get_minmax(self,):
self.list_min = self.data[self.colname_features].min(axis=0).tolist()
self.list_max = self.data[self.colname_features].max(axis=0).tolist()
def get_mean_std(self,):
self.list_mean = self.data[self.colname_features].mean(axis=0).tolist()
self.list_std = self.data[self.colname_features].std(axis=0).tolist()
def save(self,):
"""Guardar series de tiempo generadas como numpy y un archivo de
configuracion con los timesteps, features y paths a los numpy"""
folder = Path("time_series")
folder.mkdir(exist_ok=True)
folder.joinpath("numpy").mkdir(exist_ok=True)
print("Guardando series de tiempo")
time.sleep(1)
for name_ts, ts_array in tqdm(self.time_series.items()):
path_save = str(folder.joinpath("numpy/{}.npy".format(name_ts)))
np.save(path_save, ts_array)
time_series_config = dict(
features=self.colname_features,
timesteps=[self.date_to_str(ts) for ts in self.timesteps],
id_time_series=list(self.time_series.keys()),
basepath_time_series=str(folder.joinpath("numpy").absolute()),
list_min=self.list_min,
list_max=self.list_max,
list_mean=self.list_mean,
list_std=self.list_std
)
path_save_config = str(folder.joinpath("time_series_config.json"))
with open(path_save_config, "w", encoding="utf8") as fp:
json.dump(time_series_config, fp, ensure_ascii=False, indent=4)
print("Series de tiempo guardadas en {}".format(str(folder.absolute())))
@staticmethod
def _file_exists(filename):
"Verificar si el archivo proporcionado existe en memoria"
if os.path.exists(filename):
return True
else:
print("El archivo no existe. Revise si el directorio '{}' es correcto.".format(filename))
@staticmethod
def str_to_date(date):
"Transformar una fecha en formato str a date. Formato 'YYYY-MM-dd'"
if isinstance(date, str):
return datetime.date.fromisoformat(date)
else:
# TODO Comprobar correcto uso de raise y Exception
raise Exception("'date' debe ser un string de fecha con formato 'YYYY-MM-dd'")
@staticmethod
def date_to_str(date):
"Transformar un string de la forma 'YYYY-MM-dd' a objeto del tipo datetime.date(year, month, day)"
return date.isoformat()
@staticmethod
def add_days(date, days = 1):
"Agregar/quitar dias a una fecha en formato date"
assert isinstance(date, datetime.date), "'date' debe ser un objeto datetime.date"
return date + datetime.timedelta(days) | [
"os.path.exists",
"pandas.read_csv",
"pathlib.Path",
"json.dump",
"tqdm.tqdm",
"os.path.splitext",
"time.sleep",
"numpy.array",
"datetime.date.fromisoformat",
"datetime.timedelta",
"numpy.save"
] | [((1882, 1895), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1892, 1895), False, 'import time\n'), ((1925, 1950), 'tqdm.tqdm', 'tqdm', (['self.id_time_series'], {}), '(self.id_time_series)\n', (1929, 1950), False, 'from tqdm import tqdm\n'), ((4758, 4772), 'numpy.array', 'np.array', (['rows'], {}), '(rows)\n', (4766, 4772), True, 'import numpy as np\n'), ((6105, 6124), 'pathlib.Path', 'Path', (['"""time_series"""'], {}), "('time_series')\n", (6109, 6124), False, 'from pathlib import Path\n'), ((6276, 6289), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6286, 6289), False, 'import time\n'), ((7351, 7375), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (7365, 7375), False, 'import os\n'), ((2219, 2251), 'os.path.splitext', 'os.path.splitext', (['self.path_data'], {}), '(self.path_data)\n', (2235, 2251), False, 'import os\n'), ((2562, 2589), 'pandas.read_csv', 'pd.read_csv', (['self.path_data'], {}), '(self.path_data)\n', (2573, 2589), True, 'import pandas as pd\n'), ((6448, 6476), 'numpy.save', 'np.save', (['path_save', 'ts_array'], {}), '(path_save, ts_array)\n', (6455, 6476), True, 'import numpy as np\n'), ((7077, 7140), 'json.dump', 'json.dump', (['time_series_config', 'fp'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(time_series_config, fp, ensure_ascii=False, indent=4)\n', (7086, 7140), False, 'import json\n'), ((7701, 7734), 'datetime.date.fromisoformat', 'datetime.date.fromisoformat', (['date'], {}), '(date)\n', (7728, 7734), False, 'import datetime\n'), ((8314, 8338), 'datetime.timedelta', 'datetime.timedelta', (['days'], {}), '(days)\n', (8332, 8338), False, 'import datetime\n')] |
import numpy as np
def select_threshold(yval, pval):
f1 = 0
# You have to return these values correctly
best_eps = 0
best_f1 = 0
for epsilon in np.linspace(np.min(pval), np.max(pval), num=1001):
# ===================== Your Code Here =====================
# Instructions: Compute the F1 score of choosing epsilon as the
# threshold and place the value in F1. The code at the
# end of the loop will compare the F1 score for this
# choice of epsilon and set it to be the best epsilon if
# it is better than the current choice of epsilon.
#
# Note : You can use predictions = pval < epsilon to get a binary vector
# of False(0)'s and True(1)'s of the outlier predictions
#
predictions = pval < epsilon
tp = np.sum(np.logical_and(predictions, yval))
fp = np.sum(np.logical_and(predictions, yval==0))
fn = np.sum(np.logical_and(np.logical_not(predictions), yval==1))
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = (2 * precision * recall) / (precision + recall)
# ==========================================================
if f1 > best_f1:
best_f1 = f1
best_eps = epsilon
return best_eps, best_f1
| [
"numpy.max",
"numpy.logical_not",
"numpy.logical_and",
"numpy.min"
] | [((180, 192), 'numpy.min', 'np.min', (['pval'], {}), '(pval)\n', (186, 192), True, 'import numpy as np\n'), ((194, 206), 'numpy.max', 'np.max', (['pval'], {}), '(pval)\n', (200, 206), True, 'import numpy as np\n'), ((894, 927), 'numpy.logical_and', 'np.logical_and', (['predictions', 'yval'], {}), '(predictions, yval)\n', (908, 927), True, 'import numpy as np\n'), ((949, 987), 'numpy.logical_and', 'np.logical_and', (['predictions', '(yval == 0)'], {}), '(predictions, yval == 0)\n', (963, 987), True, 'import numpy as np\n'), ((1022, 1049), 'numpy.logical_not', 'np.logical_not', (['predictions'], {}), '(predictions)\n', (1036, 1049), True, 'import numpy as np\n')] |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:gis]
# language: python
# name: conda-env-gis-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
import cartopy.crs as ccrs
# from cartopy.io import shapereader
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize, LogNorm, PowerNorm
from matplotlib.collections import PatchCollection
import geopandas
from matplotlib.patches import Polygon
import shapely
import matplotlib as mpl
import pyproj as prj
from osgeo import ogr
# import xarray
# import netCDF4 as cf
# import pandas as pd
# from datetime import datetime
# from calendar import monthrange
# from collections import namedtuple
import numpy as np
# import os
from pyPRMS.ParamDb import ParamDb
# %%
work_dir = '/Users/pnorton/Projects/National_Hydrology_Model/datasets/bandit/nhmparamdb_CONUS'
shpfile_extent = '/Users/pnorton/Projects/National_Hydrology_Model/notebooks/GIS/all_nhru_simple_lcc/nhruNationalIdentifier.shp'
shape_key='hru_id_nat'
# %%
pdb = ParamDb(paramdb_dir=work_dir, verbose=True, verify=True)
# %%
def plot_polygon_collection(ax, geoms, values=None, colormap='Set1', facecolor=None, edgecolor=None,
alpha=0.5, linewidth=1.0, **kwargs):
""" Plot a collection of Polygon geometries """
# from https://stackoverflow.com/questions/33714050/geopandas-plotting-any-way-to-speed-things-up
patches = []
for poly in geoms:
a = np.asarray(poly.exterior)
if poly.has_z:
poly = shapely.geometry.Polygon(zip(*poly.exterior.xy))
patches.append(Polygon(a))
patches = PatchCollection(patches, facecolor=facecolor, linewidth=linewidth, edgecolor=edgecolor, alpha=alpha, **kwargs)
if values is not None:
patches.set_array(values)
patches.set_cmap(colormap)
ax.add_collection(patches, autolim=True)
ax.autoscale_view()
return patches
# %%
# ### Get extent information from the national HRUs shapefile
# Need two shapefiles 1) in projected coordinates, 2) in geographic coordinates
# If gdal is installed can create geographic coordinates from projected with:
# ogr2ogr -t_srs epsg:4326 output_wgs84.shp input.shp
# shpfile = '/Users/pnorton/Projects/National_Hydrology_Model/extraction_requests/20180307_red_river/GIS/HRU_subset_nad83.shp'
# shpfile_extent = '/Users/pnorton/Projects/National_Hydrology_Model/extraction_requests/20180307_red_river/GIS/HRU_subset_usaea.shp'
shpfile = '/Users/pnorton/Projects/National_Hydrology_Model/notebooks/GIS/all_nhru_simple/nhruNationalIdentifier.shp'
shpfile_extent = '/Users/pnorton/Projects/National_Hydrology_Model/notebooks/GIS/all_nhru_simple_lcc/nhruNationalIdentifier.shp'
# shpfile_extent = '/Users/pnorton/Projects/National_Hydrology_Model/GIS/NHM_GF_reduced.gdb'
# shpfile_extent = '/Users/pnorton/Projects/National_Hydrology_Model/notebooks/GIS/all_nhru_simple_usaea/nhruNationalIdentifier.shp'
# Name of attribute to use. Change to match the name of the HRU id attribute in the shapefile
shape_key='hru_id_nat'
# Use gdal/ogr to get the extent information
# Shapefile can be in projected coordinates
# Driver can be: OpenFileGDB or ESRI Shapefile
inDriver = ogr.GetDriverByName("ESRI Shapefile")
inDataSource = inDriver.Open(shpfile_extent, 0)
inLayer = inDataSource.GetLayer()
# inLayer = inDataSource.GetLayerByName('nhruNationalIdentifier')
extent = inLayer.GetExtent()
# Get the spatial reference information from the shapefile
spatial_ref = inLayer.GetSpatialRef()
# Create transformation object using projection information from the shapefile
xform = prj.Proj(spatial_ref.ExportToProj4())
west, east, south, north = extent
pad = 100000. # amount to pad the extent values with (in meters)
#east += pad
#west -= pad
#south -= pad
#north += pad
LL_lon, LL_lat = xform(west, south, inverse=True)
UR_lon, UR_lat = xform(east, north, inverse=True)
print('\tExtent: ({0:f}, {1:f}, {2:f}, {3:f})'.format(west, east, south, north))
print('\tExtent: (LL: [{}, {}], UR: [{}, {}])'.format(LL_lon, LL_lat, UR_lon, UR_lat))
extent_dms = [LL_lon, UR_lon, LL_lat, UR_lat]
# Matplotlib basemap requires the map center (lon_0, lat_0) be in decimal degrees
# and yet the corners of the extent can be in projected coordinates
cen_lon, cen_lat = xform((east+west)/2, (south+north)/2, inverse=True)
print('cen_lon: {}'.format(cen_lon))
print('cen_lat: {}'.format(cen_lat))
# %%
print(spatial_ref)
# %%
# Read the shapefile
hru_df = geopandas.read_file(shpfile_extent)
# %%
hru_df.crs.coordinate_operation.method_code
# %%
hru_df.crs.coordinate_operation.params
# %%
aa = {}
for yy in hru_df.crs.coordinate_operation.params:
aa[yy.name] = yy.value
print(yy.name, yy.value)
# %%
minx, miny, maxx, maxy = hru_df.geometry.total_bounds
# %%
the_var = 'tstorm_mo'
time_index = 0
# param_var = pdb.parameters.get_dataframe(the_var).iloc[:]
# Use the following for nhru x nmonths parameters
param_var = pdb.parameters.get_dataframe(the_var).iloc[:,time_index].to_frame(name=the_var)
param_var.head()
# param_var = pdb.parameters.get_dataframe(the_var)
# param_var.head()
# %%
# Create the colormap
# cmap = 'BrBG' #'GnBu_r' # for snow
# cmap = 'GnBu_r'
# cmap = 'jet'
if the_var in ['tmax_allsnow', 'tmax_allrain_offset']:
cmap = 'RdBu_r'
elif the_var in ['net_ppt', 'net_rain', 'net_snow']:
cmap = 'YlGnBu'
elif the_var in ['tmax_cbh_adj', 'tmin_cbh_adj']:
cmap = 'coolwarm'
else:
cmap = 'jet'
# create the colormap if a list of names is given, otherwise
# use the given colormap
lscm = mpl.colors.LinearSegmentedColormap
if isinstance(cmap,(list,tuple)):
cmap = lscm.from_list('mycm', cmap)
else:
cmap = plt.get_cmap(cmap)
missing_color = '#ff00cb' # pink/magenta
# Get the min and max values for the variable
max_val = param_var.max().max()
min_val = param_var.min().min()
# Override for tmax_allsnow
# max_val = 35.8
# min_val = 28.2
# norm = PowerNorm(gamma=0.05)
# norm = LogNorm(vmin=min_val, vmax=max_val)
if min_val == 0.:
if the_var in ['net_ppt', 'net_rain', 'net_snow']:
cmap.set_under(color='None')
norm = LogNorm(vmin=0.000001, vmax=max_val)
else:
norm = Normalize(vmin=0.000001, vmax=max_val)
else:
if the_var in ['tmax_allsnow', 'tmax_allrain_offset']:
norm = Normalize(vmin=min_val, vmax=max_val)
elif the_var in ['tmax_cbh_adj', 'tmin_cbh_adj']:
norm = Normalize(vmin=-max_val, vmax=max_val)
else:
norm = Normalize(vmin=min_val, vmax=max_val)
# %%
print(max_val)
print(min_val)
# %%
# This takes care of multipolygons that are in the NHM geodatabase/shapefile
geoms_exploded = hru_df.explode().reset_index(level=1, drop=True)
# xdf_df = xdf[the_var][2].to_dataframe()
df_mrg = geoms_exploded.merge(param_var, left_on=shape_key, right_index=True, how='left')
if '9822' in hru_df.crs.coordinate_operation.method_code:
# Albers Equal Area
aa = {}
for yy in hru_df.crs.coordinate_operation.params:
aa[yy.name] = yy.value
crs_proj = ccrs.AlbersEqualArea(central_longitude=aa['Longitude of false origin'],
central_latitude=aa['Latitude of false origin'],
standard_parallels=(aa['Latitude of 1st standard parallel'],
aa['Latitude of 2nd standard parallel']),
false_easting=aa['Easting at false origin'],
false_northing=aa['Northing at false origin'])
elif '9802' in hru_df.crs.coordinate_operation.method_code:
# Lambert Conformal Conic
crs_proj = ccrs.LambertConformal(central_latitude=aa['Latitude of false origin'],
central_longitude=aa['Longitude of false origin'],
standard_parallels=(aa['Latitude of 1st standard parallel'],
aa['Latitude of 2nd standard parallel']),
false_easting=aa['Easting at false origin'],
false_northing=aa['Northing at false origin'])
else:
# We're gonna crash
pass
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(30,20))
ax = plt.axes(projection=crs_proj)
ax.coastlines()
ax.gridlines()
# ax.set_extent(extent_dms)
ax.set_extent([minx, maxx, miny, maxy], crs=crs_proj)
mapper = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
mapper.set_array(df_mrg[the_var])
plt.colorbar(mapper, shrink=0.6)
# plt.title('Variable: {}, Date: {}'.format(the_var, xdf_df['time'].iloc[0].isoformat()))
plt.title(f'Variable: {the_var}, Month: {time_index+1}')
# plt.title('Variable: {}'.format(the_var))
col = plot_polygon_collection(ax, df_mrg.geometry, values=df_mrg[the_var], colormap=cmap, norm=norm, linewidth=0.0)
# plt.savefig(f'/Users/pnorton/tmp/v1_figs/{the_var}_v11_{time_index+1:02}.png', dpi=300, bbox_inches='tight')
# %%
# xdf_df = xdf[the_var][4].to_dataframe()
time_index = 5
param_var = pdb.parameters.get_dataframe(the_var).iloc[:,time_index].to_frame(name=the_var)
df_mrg = geoms_exploded.merge(param_var, left_on='hru_id_nat', right_index=True, how='left')
# ax.set_title('Variable: {}, Date: {}'.format(the_var, xdf_df['time'].iloc[0].isoformat()))
ax.set_title(f'Variable: {the_var}, Month: {time_index+1}')
col.set_array(df_mrg[the_var])
fig
# plt.savefig(f'/Users/pnorton/tmp/v1_figs/{the_var}_v11_{time_index+1:02}.png', dpi=300, bbox_inches='tight')
# %%
# %%
# %%
| [
"geopandas.read_file",
"cartopy.crs.LambertConformal",
"matplotlib.pyplot.colorbar",
"numpy.asarray",
"matplotlib.collections.PatchCollection",
"matplotlib.cm.ScalarMappable",
"matplotlib.pyplot.axes",
"pyPRMS.ParamDb.ParamDb",
"cartopy.crs.AlbersEqualArea",
"osgeo.ogr.GetDriverByName",
"matplot... | [((1240, 1296), 'pyPRMS.ParamDb.ParamDb', 'ParamDb', ([], {'paramdb_dir': 'work_dir', 'verbose': '(True)', 'verify': '(True)'}), '(paramdb_dir=work_dir, verbose=True, verify=True)\n', (1247, 1296), False, 'from pyPRMS.ParamDb import ParamDb\n'), ((3430, 3467), 'osgeo.ogr.GetDriverByName', 'ogr.GetDriverByName', (['"""ESRI Shapefile"""'], {}), "('ESRI Shapefile')\n", (3449, 3467), False, 'from osgeo import ogr\n'), ((4702, 4737), 'geopandas.read_file', 'geopandas.read_file', (['shpfile_extent'], {}), '(shpfile_extent)\n', (4721, 4737), False, 'import geopandas\n'), ((8486, 8534), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(30, 20)'}), '(nrows=1, ncols=1, figsize=(30, 20))\n', (8498, 8534), True, 'import matplotlib.pyplot as plt\n'), ((8540, 8569), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': 'crs_proj'}), '(projection=crs_proj)\n', (8548, 8569), True, 'import matplotlib.pyplot as plt\n'), ((8693, 8736), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'cmap'}), '(norm=norm, cmap=cmap)\n', (8714, 8736), True, 'import matplotlib as mpl\n'), ((8771, 8803), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['mapper'], {'shrink': '(0.6)'}), '(mapper, shrink=0.6)\n', (8783, 8803), True, 'import matplotlib.pyplot as plt\n'), ((8896, 8955), 'matplotlib.pyplot.title', 'plt.title', (['f"""Variable: {the_var}, Month: {time_index + 1}"""'], {}), "(f'Variable: {the_var}, Month: {time_index + 1}')\n", (8905, 8955), True, 'import matplotlib.pyplot as plt\n'), ((1847, 1961), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['patches'], {'facecolor': 'facecolor', 'linewidth': 'linewidth', 'edgecolor': 'edgecolor', 'alpha': 'alpha'}), '(patches, facecolor=facecolor, linewidth=linewidth,\n edgecolor=edgecolor, alpha=alpha, **kwargs)\n', (1862, 1961), False, 'from matplotlib.collections import PatchCollection\n'), ((5917, 5935), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (5929, 5935), True, 'import matplotlib.pyplot as plt\n'), ((7283, 7617), 'cartopy.crs.AlbersEqualArea', 'ccrs.AlbersEqualArea', ([], {'central_longitude': "aa['Longitude of false origin']", 'central_latitude': "aa['Latitude of false origin']", 'standard_parallels': "(aa['Latitude of 1st standard parallel'], aa[\n 'Latitude of 2nd standard parallel'])", 'false_easting': "aa['Easting at false origin']", 'false_northing': "aa['Northing at false origin']"}), "(central_longitude=aa['Longitude of false origin'],\n central_latitude=aa['Latitude of false origin'], standard_parallels=(aa\n ['Latitude of 1st standard parallel'], aa[\n 'Latitude of 2nd standard parallel']), false_easting=aa[\n 'Easting at false origin'], false_northing=aa['Northing at false origin'])\n", (7303, 7617), True, 'import cartopy.crs as ccrs\n'), ((1679, 1704), 'numpy.asarray', 'np.asarray', (['poly.exterior'], {}), '(poly.exterior)\n', (1689, 1704), True, 'import numpy as np\n'), ((6362, 6395), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {'vmin': '(1e-06)', 'vmax': 'max_val'}), '(vmin=1e-06, vmax=max_val)\n', (6369, 6395), False, 'from matplotlib.colors import Normalize, LogNorm, PowerNorm\n'), ((6424, 6459), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': '(1e-06)', 'vmax': 'max_val'}), '(vmin=1e-06, vmax=max_val)\n', (6433, 6459), False, 'from matplotlib.colors import Normalize, LogNorm, PowerNorm\n'), ((6543, 6580), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': 'min_val', 'vmax': 'max_val'}), '(vmin=min_val, vmax=max_val)\n', (6552, 6580), False, 'from matplotlib.colors import Normalize, LogNorm, PowerNorm\n'), ((7907, 8242), 'cartopy.crs.LambertConformal', 'ccrs.LambertConformal', ([], {'central_latitude': "aa['Latitude of false origin']", 'central_longitude': "aa['Longitude of false origin']", 'standard_parallels': "(aa['Latitude of 1st standard parallel'], aa[\n 'Latitude of 2nd standard parallel'])", 'false_easting': "aa['Easting at false origin']", 'false_northing': "aa['Northing at false origin']"}), "(central_latitude=aa['Latitude of false origin'],\n central_longitude=aa['Longitude of false origin'], standard_parallels=(\n aa['Latitude of 1st standard parallel'], aa[\n 'Latitude of 2nd standard parallel']), false_easting=aa[\n 'Easting at false origin'], false_northing=aa['Northing at false origin'])\n", (7928, 8242), True, 'import cartopy.crs as ccrs\n'), ((1820, 1830), 'matplotlib.patches.Polygon', 'Polygon', (['a'], {}), '(a)\n', (1827, 1830), False, 'from matplotlib.patches import Polygon\n'), ((6650, 6688), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': '(-max_val)', 'vmax': 'max_val'}), '(vmin=-max_val, vmax=max_val)\n', (6659, 6688), False, 'from matplotlib.colors import Normalize, LogNorm, PowerNorm\n'), ((6722, 6759), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': 'min_val', 'vmax': 'max_val'}), '(vmin=min_val, vmax=max_val)\n', (6731, 6759), False, 'from matplotlib.colors import Normalize, LogNorm, PowerNorm\n')] |
from __future__ import division
import unittest
from numpy.testing import assert_allclose
class TestInterpolation(unittest.TestCase):
# def test_chebychev(self):
#
# import numpy as np
# from dolo.numeric.interpolation.smolyak import chebychev, chebychev2
#
# points = np.linspace(-1,1,100)
#
# cheb = chebychev(points,5)
# cheb2 = chebychev2(points,5)
#
# def T4(x):
# return ( 8*np.power(x,4) - 8*np.power(x,2) + 1 )
# def U4(x):
# return 4*( 16*np.power(x,4) - 12*np.power(x,2) + 1 )
#
# true_values_T = np.array([T4(i) for i in points])
# true_values_U = np.array([U4(i) for i in points])
#
# assert_allclose(true_values_T, cheb[4,:])
# assert_allclose(true_values_U, cheb2[4,:]*4)
def test_smolyak(self):
import numpy
f = lambda x: numpy.column_stack([
x[:,0] * x[:,1]**0.5,
x[:,1] * x[:,1] - x[:,0] * x[:,0]
])
a = [0.5,0.1]
b = [2,3]
bounds = numpy.row_stack([a,b])
from dolo.numeric.interpolation.smolyak import SmolyakGrid
sg = SmolyakGrid(a,b,3)
values = f(sg.grid)
sg.set_values(values)
assert( abs( sg(sg.grid) - values ).max()<1e-8 )
#
# def test_smolyak_plot_2d(selfs):
#
# import numpy
# from dolo.numeric.interpolation.smolyak import SmolyakGrid
#
# bounds = numpy.column_stack([[-1,1]]*2)
# sg = SmolyakGrid(bounds[0,:],bounds[1,:],3)
# sg.plot_grid()
#
# def test_smolyak_plot_3d(selfs):
#
# import numpy
# from dolo.numeric.interpolation.smolyak import SmolyakGrid
#
# bounds = numpy.column_stack([[-1,1]]*3)
# sg = SmolyakGrid(bounds[0,:],bounds[1,:],3)
# sg.plot_grid()
def test_smolyak_2(self):
import numpy
from dolo.numeric.interpolation.smolyak import SmolyakGrid
d = 5
l = 4
bounds = numpy.row_stack([[-0.5]*d, [0.7]*d])
sg = SmolyakGrid(bounds[0,:],bounds[1,:],l)
f = lambda x: numpy.row_stack([
x[:,0] * x[:,1],
x[:,1] * x[:,1] - x[:,0] * x[:,0]
])
values = f(sg.grid)
import time
t = time.time()
for i in range(5):
sg.set_values(sg.grid)
val = sg(sg.grid)
s = time.time()
print(s-t)
if __name__ == '__main__':
unittest.main()
| [
"dolo.numeric.interpolation.smolyak.SmolyakGrid",
"numpy.column_stack",
"numpy.row_stack",
"unittest.main",
"time.time"
] | [((2522, 2537), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2535, 2537), False, 'import unittest\n'), ((1076, 1099), 'numpy.row_stack', 'numpy.row_stack', (['[a, b]'], {}), '([a, b])\n', (1091, 1099), False, 'import numpy\n'), ((1181, 1201), 'dolo.numeric.interpolation.smolyak.SmolyakGrid', 'SmolyakGrid', (['a', 'b', '(3)'], {}), '(a, b, 3)\n', (1192, 1201), False, 'from dolo.numeric.interpolation.smolyak import SmolyakGrid\n'), ((2040, 2080), 'numpy.row_stack', 'numpy.row_stack', (['[[-0.5] * d, [0.7] * d]'], {}), '([[-0.5] * d, [0.7] * d])\n', (2055, 2080), False, 'import numpy\n'), ((2090, 2132), 'dolo.numeric.interpolation.smolyak.SmolyakGrid', 'SmolyakGrid', (['bounds[0, :]', 'bounds[1, :]', 'l'], {}), '(bounds[0, :], bounds[1, :], l)\n', (2101, 2132), False, 'from dolo.numeric.interpolation.smolyak import SmolyakGrid\n'), ((2340, 2351), 'time.time', 'time.time', ([], {}), '()\n', (2349, 2351), False, 'import time\n'), ((2457, 2468), 'time.time', 'time.time', ([], {}), '()\n', (2466, 2468), False, 'import time\n'), ((905, 994), 'numpy.column_stack', 'numpy.column_stack', (['[x[:, 0] * x[:, 1] ** 0.5, x[:, 1] * x[:, 1] - x[:, 0] * x[:, 0]]'], {}), '([x[:, 0] * x[:, 1] ** 0.5, x[:, 1] * x[:, 1] - x[:, 0] *\n x[:, 0]])\n', (923, 994), False, 'import numpy\n'), ((2151, 2226), 'numpy.row_stack', 'numpy.row_stack', (['[x[:, 0] * x[:, 1], x[:, 1] * x[:, 1] - x[:, 0] * x[:, 0]]'], {}), '([x[:, 0] * x[:, 1], x[:, 1] * x[:, 1] - x[:, 0] * x[:, 0]])\n', (2166, 2226), False, 'import numpy\n')] |
"""Plot mean absolute error (MAE) figures.
Two types of plots are done:
- MAE versus the chronological age,
- MAE of one modality versus MAE of another modality.
"""
from itertools import combinations
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
FIG_OUT_PATH = '../../data/figures/'
PREDICTIONS = '../../data/age_prediction_exp_data.h5'
OUT_FTYPE = 'png'
data = pd.read_hdf(PREDICTIONS, key='predictions')
# Plot errors of predictions from different modalities versus subject's age
keys = data.columns
# remove column with the original age
keys = keys[1:]
ylim = (-2, 55)
xlim = None
out_folder = os.path.join(FIG_OUT_PATH, 'ae_vs_age')
if os.path.exists(out_folder):
shutil.rmtree(out_folder)
os.mkdir(out_folder)
else:
os.mkdir(out_folder)
for key1 in keys:
data_slice = data[key1].dropna()
age = data.loc[data_slice.index, 'age'].values
abs_errors = np.abs(data_slice.values - age)
plt.close()
plt.figure()
plt.scatter(age, abs_errors, edgecolors='black')
plt.title(key1)
plt.xlabel('Age (Years)')
plt.ylabel('Absolute Error (Years)')
plt.grid()
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
name = f'AE_vs_age_{key1.replace(" ", "-")}.{OUT_FTYPE}'
plt.savefig(os.path.join(out_folder, name), bbox_inches='tight')
# Plot errors of predictions from different modalities versus each other
data = data.dropna()
fold_idx = data['fold_idx']
data = data.drop(['fold_idx'], axis=1)
age = data.age.values
keys = data.columns
# remove column with the original age
keys = keys[1:]
xlim = (0, 55)
ylim = (0, 55)
out_folder = os.path.join(FIG_OUT_PATH, 'ae_predictor_vs_predictor')
if os.path.exists(out_folder):
shutil.rmtree(out_folder)
os.mkdir(out_folder)
else:
os.mkdir(out_folder)
title = 'Absolute Error Correlation'
for key1, key2 in combinations(keys, r=2):
plt.close()
fig, ax = plt.subplots()
x_values = np.abs(data[key1].values - age)
y_values = np.abs(data[key2].values - age)
plt.scatter(x_values, y_values, edgecolors='black',
c=age, cmap=plt.cm.viridis_r)
plt.title(title)
plt.xlabel(key1 + ', AE (Years)')
plt.ylabel(key2 + ', AE (Years)')
if xlim is not None:
xlim_ = (xlim[0] - 1, xlim[1] + 1)
else:
xlim_ = (data[key1].min() - 1, data[key1].max() + 1)
if ylim is not None:
ylim_ = (ylim[0] - 1, ylim[1] + 1)
else:
ylim_ = (data[key2].min() - 1, data[key2].max() + 1)
ax.set(xlim=xlim_, ylim=ylim_)
ax.plot(ax.get_xlim(), ax.get_ylim(), ls='--', c='.3')
plt.grid()
plt.colorbar()
name = f'AE_{key1.replace(" ", "-")}_vs_{key2.replace(" ", "-")}'\
f'.{OUT_FTYPE}'
plt.savefig(os.path.join(out_folder, name), bbox_inches='tight')
| [
"os.path.exists",
"numpy.abs",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"os.path.join",
"itertools.combinations",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"os.mkdir",
"matplotlib.pyplot.scatter",
"shutil.rmt... | [((425, 468), 'pandas.read_hdf', 'pd.read_hdf', (['PREDICTIONS'], {'key': '"""predictions"""'}), "(PREDICTIONS, key='predictions')\n", (436, 468), True, 'import pandas as pd\n'), ((661, 700), 'os.path.join', 'os.path.join', (['FIG_OUT_PATH', '"""ae_vs_age"""'], {}), "(FIG_OUT_PATH, 'ae_vs_age')\n", (673, 700), False, 'import os\n'), ((705, 731), 'os.path.exists', 'os.path.exists', (['out_folder'], {}), '(out_folder)\n', (719, 731), False, 'import os\n'), ((1697, 1752), 'os.path.join', 'os.path.join', (['FIG_OUT_PATH', '"""ae_predictor_vs_predictor"""'], {}), "(FIG_OUT_PATH, 'ae_predictor_vs_predictor')\n", (1709, 1752), False, 'import os\n'), ((1757, 1783), 'os.path.exists', 'os.path.exists', (['out_folder'], {}), '(out_folder)\n', (1771, 1783), False, 'import os\n'), ((1927, 1950), 'itertools.combinations', 'combinations', (['keys'], {'r': '(2)'}), '(keys, r=2)\n', (1939, 1950), False, 'from itertools import combinations\n'), ((737, 762), 'shutil.rmtree', 'shutil.rmtree', (['out_folder'], {}), '(out_folder)\n', (750, 762), False, 'import shutil\n'), ((767, 787), 'os.mkdir', 'os.mkdir', (['out_folder'], {}), '(out_folder)\n', (775, 787), False, 'import os\n'), ((798, 818), 'os.mkdir', 'os.mkdir', (['out_folder'], {}), '(out_folder)\n', (806, 818), False, 'import os\n'), ((943, 974), 'numpy.abs', 'np.abs', (['(data_slice.values - age)'], {}), '(data_slice.values - age)\n', (949, 974), True, 'import numpy as np\n'), ((979, 990), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (988, 990), True, 'import matplotlib.pyplot as plt\n'), ((995, 1007), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1005, 1007), True, 'import matplotlib.pyplot as plt\n'), ((1012, 1060), 'matplotlib.pyplot.scatter', 'plt.scatter', (['age', 'abs_errors'], {'edgecolors': '"""black"""'}), "(age, abs_errors, edgecolors='black')\n", (1023, 1060), True, 'import matplotlib.pyplot as plt\n'), ((1065, 1080), 'matplotlib.pyplot.title', 'plt.title', (['key1'], {}), '(key1)\n', (1074, 1080), True, 'import matplotlib.pyplot as plt\n'), ((1085, 1110), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Age (Years)"""'], {}), "('Age (Years)')\n", (1095, 1110), True, 'import matplotlib.pyplot as plt\n'), ((1115, 1151), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Absolute Error (Years)"""'], {}), "('Absolute Error (Years)')\n", (1125, 1151), True, 'import matplotlib.pyplot as plt\n'), ((1156, 1166), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1164, 1166), True, 'import matplotlib.pyplot as plt\n'), ((1789, 1814), 'shutil.rmtree', 'shutil.rmtree', (['out_folder'], {}), '(out_folder)\n', (1802, 1814), False, 'import shutil\n'), ((1819, 1839), 'os.mkdir', 'os.mkdir', (['out_folder'], {}), '(out_folder)\n', (1827, 1839), False, 'import os\n'), ((1850, 1870), 'os.mkdir', 'os.mkdir', (['out_folder'], {}), '(out_folder)\n', (1858, 1870), False, 'import os\n'), ((1956, 1967), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1965, 1967), True, 'import matplotlib.pyplot as plt\n'), ((1982, 1996), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1994, 1996), True, 'import matplotlib.pyplot as plt\n'), ((2012, 2043), 'numpy.abs', 'np.abs', (['(data[key1].values - age)'], {}), '(data[key1].values - age)\n', (2018, 2043), True, 'import numpy as np\n'), ((2059, 2090), 'numpy.abs', 'np.abs', (['(data[key2].values - age)'], {}), '(data[key2].values - age)\n', (2065, 2090), True, 'import numpy as np\n'), ((2095, 2181), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_values', 'y_values'], {'edgecolors': '"""black"""', 'c': 'age', 'cmap': 'plt.cm.viridis_r'}), "(x_values, y_values, edgecolors='black', c=age, cmap=plt.cm.\n viridis_r)\n", (2106, 2181), True, 'import matplotlib.pyplot as plt\n'), ((2197, 2213), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2206, 2213), True, 'import matplotlib.pyplot as plt\n'), ((2218, 2251), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["(key1 + ', AE (Years)')"], {}), "(key1 + ', AE (Years)')\n", (2228, 2251), True, 'import matplotlib.pyplot as plt\n'), ((2256, 2289), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["(key2 + ', AE (Years)')"], {}), "(key2 + ', AE (Years)')\n", (2266, 2289), True, 'import matplotlib.pyplot as plt\n'), ((2669, 2679), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2677, 2679), True, 'import matplotlib.pyplot as plt\n'), ((2684, 2698), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2696, 2698), True, 'import matplotlib.pyplot as plt\n'), ((1201, 1215), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (1209, 1215), True, 'import matplotlib.pyplot as plt\n'), ((1249, 1263), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (1257, 1263), True, 'import matplotlib.pyplot as plt\n'), ((1342, 1372), 'os.path.join', 'os.path.join', (['out_folder', 'name'], {}), '(out_folder, name)\n', (1354, 1372), False, 'import os\n'), ((2814, 2844), 'os.path.join', 'os.path.join', (['out_folder', 'name'], {}), '(out_folder, name)\n', (2826, 2844), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
from scipy import ndimage as ndi
from skimage.morphology import watershed
from skimage.feature import peak_local_max
from sklearn.cluster import MeanShift
from PIL import Image
size = 100, 100
img_names = ["../Images/Segmentation/strawberry.png", "../Images/Segmentation/shapes.png"]
ext_names = ["../Images/Segmentation/coins.png", "../Images/Segmentation/two_halves.png"]
output_path_extension = '../OutputImages/Segmentation/'
images = [i for i in img_names]
ext_images = [i for i in ext_names]
def plot_three_images(figure_title, image1, label1,
image2, label2, image3, label3, output_path):
fig = plt.figure()
fig.suptitle(figure_title)
# Display the first image
fig.add_subplot(1, 3, 1)
plt.imshow(image1)
plt.axis('off')
plt.title(label1)
# Display the second image
fig.add_subplot(1, 3, 2)
plt.imshow(image2)
plt.axis('off')
plt.title(label2)
# Display the third image
fig.add_subplot(1, 3, 3)
plt.imshow(image3)
plt.axis('off')
plt.title(label3)
plt.show()
fig.savefig(output_path)
for img_path in images:
img = Image.open(img_path)
img.thumbnail(size) # Convert the image to 100 x 100
# Convert the image to a numpy matrix
img_mat = np.array(img)[:, :, :3]
# --------------- Mean Shift algortithm ---------------------
# Extract the three RGB colour channels
b, g, r = cv2.split(img_mat)
# Combine the three colour channels by flatten each channel
# then stacking the flattened channels together.
# This gives the "colour_samples"
colour_samples = np.stack((b.flatten(), g.flatten(), r.flatten()), axis=1)
# Perform Meanshift clustering
ms_clf = MeanShift(bin_seeding=True)
ms_labels = ms_clf.fit_predict(colour_samples)
# Reshape ms_labels back to the original image shape for displaying the segmentation output
ms_labels = np.reshape(ms_labels, b.shape)
# ------------- Water Shed algortithm --------------------------
# Convert the image to gray scale and convert the image to a numpy matrix
img_array = cv2.cvtColor(img_mat, cv2.COLOR_BGR2GRAY)
# Calculate the distance transform
distance = ndi.distance_transform_edt(img_array)
# Generate the watershed markers
local_maximum = peak_local_max(distance, indices=False, footprint=np.ones((3, 3)))
markers = ndi.label(local_maximum)[0]
# Perform watershed and store the labels
ws_labels = watershed(-distance, markers, mask=img_array)
# Display the results
plot_three_images(img_path, img, "Original Image", ms_labels, "MeanShift Labels",
ws_labels, "Watershed Labels", output_path_extension + os.path.split(img_path)[1])
# If you want to visualise the watershed distance markers then try
# plotting the code below.
# plot_three_images(img_path, img, "Original Image", -distance, "Watershed Distance",
# ws_labels, "Watershed Labels", output_path_extension + os.path.split(img_path)[1])
| [
"matplotlib.pyplot.imshow",
"scipy.ndimage.distance_transform_edt",
"skimage.morphology.watershed",
"PIL.Image.open",
"matplotlib.pyplot.title",
"numpy.reshape",
"sklearn.cluster.MeanShift",
"numpy.ones",
"scipy.ndimage.label",
"os.path.split",
"numpy.array",
"matplotlib.pyplot.figure",
"cv2... | [((706, 718), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (716, 718), True, 'import matplotlib.pyplot as plt\n'), ((814, 832), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image1'], {}), '(image1)\n', (824, 832), True, 'import matplotlib.pyplot as plt\n'), ((837, 852), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (845, 852), True, 'import matplotlib.pyplot as plt\n'), ((857, 874), 'matplotlib.pyplot.title', 'plt.title', (['label1'], {}), '(label1)\n', (866, 874), True, 'import matplotlib.pyplot as plt\n'), ((940, 958), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image2'], {}), '(image2)\n', (950, 958), True, 'import matplotlib.pyplot as plt\n'), ((963, 978), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (971, 978), True, 'import matplotlib.pyplot as plt\n'), ((983, 1000), 'matplotlib.pyplot.title', 'plt.title', (['label2'], {}), '(label2)\n', (992, 1000), True, 'import matplotlib.pyplot as plt\n'), ((1065, 1083), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image3'], {}), '(image3)\n', (1075, 1083), True, 'import matplotlib.pyplot as plt\n'), ((1088, 1103), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1096, 1103), True, 'import matplotlib.pyplot as plt\n'), ((1108, 1125), 'matplotlib.pyplot.title', 'plt.title', (['label3'], {}), '(label3)\n', (1117, 1125), True, 'import matplotlib.pyplot as plt\n'), ((1131, 1141), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1139, 1141), True, 'import matplotlib.pyplot as plt\n'), ((1207, 1227), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1217, 1227), False, 'from PIL import Image\n'), ((1492, 1510), 'cv2.split', 'cv2.split', (['img_mat'], {}), '(img_mat)\n', (1501, 1510), False, 'import cv2\n'), ((1796, 1823), 'sklearn.cluster.MeanShift', 'MeanShift', ([], {'bin_seeding': '(True)'}), '(bin_seeding=True)\n', (1805, 1823), False, 'from sklearn.cluster import MeanShift\n'), ((1988, 2018), 'numpy.reshape', 'np.reshape', (['ms_labels', 'b.shape'], {}), '(ms_labels, b.shape)\n', (1998, 2018), True, 'import numpy as np\n'), ((2184, 2225), 'cv2.cvtColor', 'cv2.cvtColor', (['img_mat', 'cv2.COLOR_BGR2GRAY'], {}), '(img_mat, cv2.COLOR_BGR2GRAY)\n', (2196, 2225), False, 'import cv2\n'), ((2281, 2318), 'scipy.ndimage.distance_transform_edt', 'ndi.distance_transform_edt', (['img_array'], {}), '(img_array)\n', (2307, 2318), True, 'from scipy import ndimage as ndi\n'), ((2548, 2593), 'skimage.morphology.watershed', 'watershed', (['(-distance)', 'markers'], {'mask': 'img_array'}), '(-distance, markers, mask=img_array)\n', (2557, 2593), False, 'from skimage.morphology import watershed\n'), ((1342, 1355), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1350, 1355), True, 'import numpy as np\n'), ((2458, 2482), 'scipy.ndimage.label', 'ndi.label', (['local_maximum'], {}), '(local_maximum)\n', (2467, 2482), True, 'from scipy import ndimage as ndi\n'), ((2427, 2442), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (2434, 2442), True, 'import numpy as np\n'), ((2784, 2807), 'os.path.split', 'os.path.split', (['img_path'], {}), '(img_path)\n', (2797, 2807), False, 'import os\n')] |
import cv2 as cv
import numpy as np
import scipy
import math
import copy
# import matplotlib
# #%matplotlib inline
# import pylab as plt
# import json
from PIL import Image
from shutil import copyfile
from skimage import img_as_float
from functools import reduce
from renderopenpose import *
import os
import sys
def makebox128(miny, maxy, minx, maxx, dimy=128, dimx=128):
diffy = maxy - miny
diffx = maxx - minx
# print "diffyb", maxy - miny
# print "diffxb", maxx - minx
if diffy != dimy:
howmuch = dimy - diffy
maxy = maxy + (howmuch //2)
miny = maxy - dimy
if maxy > 512:
maxy = 512
miny = 512 - dimy
roomtoedge = miny
if miny < 0:
miny = 0
maxy = dimy
if diffx != dimx:
howmuch = dimx - diffx
maxx = maxx + (howmuch //2)
minx = maxx - dimx
if maxx > 1024:
maxx = 1024
minx = 1024 - dimx
roomtoedge = minx
if minx < 0:
minx = 0
maxx = dimx
# print "diffy", maxy - miny
# print "diffx", maxx - minx
return miny, maxy, minx, maxx
def get_faceboxes(keypoints_dir, frames_dir, save_dir, phase, start, end, step, myshape, SIZE, boxbuffer, debug=False):
# myshape = (1080, 1920, 3)
numframesmade = 0
boxbuffer = 70
tary = SIZE
tarx = 2 * SIZE
poselen = 69
saveim = False
startx = 0
endx = myshape[1]
starty = 0
endy = myshape[0]
scaley = float(tary) / float(endy - starty)
scalex = float(tarx) / float(endx - startx)
if not os.path.exists(save_dir + '/saved_ims/'):
os.makedirs(save_dir + '/saved_ims/')
if debug:
if not os.path.exists(save_dir + '/'+ phase + '_facetexts128/'):
os.makedirs(save_dir + '/' + phase + '_facetexts128/')
n = start
while n <= end:
framesmadestr = '%06d' % numframesmade
string_num = '%06d' % n
key_name = keypoints_dir + "/frame" + string_num
posepts = readkeypointsfile(key_name + "_pose.yml")
if len(posepts) != poselen:
n += 1
continue
else:
print("Getting face bounding box for " + key_name)
ave = aveface_frompose23(posepts)
avex = ave[0]
avey = ave[1]
minx = int((max(avex - boxbuffer, startx) - startx) * scalex)
miny = int((max(avey - boxbuffer, starty) - starty) * scaley)
maxx = int((min(avex + boxbuffer, endx) - startx) * scalex)
maxy = int((min(avey + boxbuffer, endy) - starty) * scaley)
miny, maxy, minx, maxx = makebox128(miny, maxy, minx, maxx)
myfile = save_dir + "/"+phase+"_facetexts128/frame" + string_num + '.txt'
F = open(myfile, "w")
F.write(str(miny) + " " + str(maxy) + " " + str(minx) + " " + str(maxx))
F.close()
if debug:
frame_name = frames_dir + '/frame' + string_num + ".png"
if not os.path.isfile(frame_name):
print('no such frame' + frame_name)
sys.exit(0)
else:
oriImg = cv.imread(frame_name)
bod = Image.fromarray(oriImg)
bod.save(save_dir + '/saved_ims/' + 'frame_fUllbod' + string_num + '.png')
oriImg = Image.fromarray(oriImg[starty:endy, startx:endx, :])
oriImg = oriImg.resize((2*SIZE,SIZE), Image.ANTIALIAS)
oriImg = np.array(oriImg)
oriImg = oriImg[miny:maxy, minx:maxx, [2,1,0]]
oriImg = Image.fromarray(oriImg)
oriImg.save(save_dir + '/saved_ims/' + 'frame' + string_num + '.png')
numframesmade += 1
n += step
| [
"os.path.exists",
"PIL.Image.fromarray",
"os.makedirs",
"os.path.isfile",
"numpy.array",
"sys.exit",
"cv2.imread"
] | [((1409, 1449), 'os.path.exists', 'os.path.exists', (["(save_dir + '/saved_ims/')"], {}), "(save_dir + '/saved_ims/')\n", (1423, 1449), False, 'import os\n'), ((1453, 1490), 'os.makedirs', 'os.makedirs', (["(save_dir + '/saved_ims/')"], {}), "(save_dir + '/saved_ims/')\n", (1464, 1490), False, 'import os\n'), ((1512, 1569), 'os.path.exists', 'os.path.exists', (["(save_dir + '/' + phase + '_facetexts128/')"], {}), "(save_dir + '/' + phase + '_facetexts128/')\n", (1526, 1569), False, 'import os\n'), ((1573, 1627), 'os.makedirs', 'os.makedirs', (["(save_dir + '/' + phase + '_facetexts128/')"], {}), "(save_dir + '/' + phase + '_facetexts128/')\n", (1584, 1627), False, 'import os\n'), ((2618, 2644), 'os.path.isfile', 'os.path.isfile', (['frame_name'], {}), '(frame_name)\n', (2632, 2644), False, 'import os\n'), ((2692, 2703), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2700, 2703), False, 'import sys\n'), ((2728, 2749), 'cv2.imread', 'cv.imread', (['frame_name'], {}), '(frame_name)\n', (2737, 2749), True, 'import cv2 as cv\n'), ((2761, 2784), 'PIL.Image.fromarray', 'Image.fromarray', (['oriImg'], {}), '(oriImg)\n', (2776, 2784), False, 'from PIL import Image\n'), ((2879, 2931), 'PIL.Image.fromarray', 'Image.fromarray', (['oriImg[starty:endy, startx:endx, :]'], {}), '(oriImg[starty:endy, startx:endx, :])\n', (2894, 2931), False, 'from PIL import Image\n'), ((3006, 3022), 'numpy.array', 'np.array', (['oriImg'], {}), '(oriImg)\n', (3014, 3022), True, 'import numpy as np\n'), ((3089, 3112), 'PIL.Image.fromarray', 'Image.fromarray', (['oriImg'], {}), '(oriImg)\n', (3104, 3112), False, 'from PIL import Image\n')] |
# Copyright (c) 2021. <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
from abc import ABC, abstractmethod
import numpy as np
from tqdm.auto import tqdm
class BaseLazyCoordinates(ABC):
@property
@abstractmethod
def shape(self):
raise NotImplementedError
def __array__(self, dtype=None):
return np.asanyarray(self[:], dtype=dtype)
@abstractmethod
def __iadd__(self, other):
raise NotImplementedError
def __add__(self, other):
new = copy.copy(self)
new += other
return new
@abstractmethod
def __isub__(self, other):
raise NotImplementedError
def __sub__(self, other):
new = copy.copy(self)
new -= other
return new
@abstractmethod
def __imul__(self, other):
raise NotImplementedError
def __mul__(self, other):
new = copy.copy(self)
new *= other
return new
@abstractmethod
def __itruediv__(self, other):
raise NotImplementedError
def __truediv__(self, other):
new = copy.copy(self)
new /= other
return new
@abstractmethod
def __imatmul__(self, other):
raise NotImplementedError
def __matmul__(self, other):
new = copy.copy(self)
new @= other
return new
@abstractmethod
def __getitem__(self, item):
return NotImplementedError
def __len__(self):
return self.shape[0]
class LazyCoordinates(BaseLazyCoordinates):
def __init__(self, x, shift=None, scale=None, rot=None):
self._x = x
dim = self.shape[1]
if shift is None:
self.shift = np.zeros((1, dim))
else:
self.shift = np.array(shift).reshape((1, dim))
if scale is None:
self.scale = 1
else:
self.scale = scale
if rot is None:
self.rot = np.eye(dim)
else:
self.rot = np.array(rot)
def save_transform(self, filename):
np.savez(filename, shift=self.shift, scale=self.scale, rot=self.rot)
@property
def shape(self):
return self._x.shape
def __copy__(self):
new = self.__class__(self._x, self.shift, self.scale, self.rot)
for key, value in self.__dict__.items():
if key not in new.__dict__:
new.__dict__[key] = value
return new
def __iadd__(self, other):
self.shift += other
return self
def __isub__(self, other):
self.shift -= other
return self
def __imul__(self, other):
self.scale *= other
self.shift *= other
return self
def __itruediv__(self, other):
self.scale /= other
self.shift /= other
return self
def __imatmul__(self, other):
self.rot = self.rot @ other
self.shift = self.shift @ other
return self
def __getitem__(self, item):
if isinstance(item, tuple):
x = self._x[item[0]]
else:
x = self._x[item]
x = x * self.scale
x = x @ self.rot
x += self.shift
if isinstance(item, tuple):
if x.ndim > 1:
return x[(slice(None), *item[1:])]
else:
return x[item[1]]
return x
def __repr__(self):
return f'{self.__class__.__name__}({repr(self._x)})'
class LazyFileCoordinates(LazyCoordinates):
def __init__(self, filename, *args, **kwargs):
with open(filename, 'rb') as f:
major, minor = np.lib.format.read_magic(f)
shape, *_ = np.lib.format.read_array_header_1_0(f)
self._shape = shape
super().__init__(filename, *args, **kwargs)
@property
def _x(self):
return np.load(self.filename, mmap_mode='r')
@_x.setter
def _x(self, other):
self.filename = other
@property
def shape(self):
return self._shape
def __copy__(self):
new = self.__class__(self.filename, self.shift, self.scale, self.rot)
for key, value in self.__dict__.items():
if key not in new.__dict__:
new.__dict__[key] = value
return new
class LazyMeanAggregatorCoordinates(BaseLazyCoordinates):
def __init__(self, patches):
self.patches = []
for patch in patches:
if isinstance(patch.coordinates, LazyMeanAggregatorCoordinates):
# flatten hierarchy
self.patches.extend(patch.coordinates.patches)
else:
self.patches.append(patch)
self.dim = patches[0].shape[1]
self.nodes = set()
for patch in patches:
self.nodes.update(patch.nodes)
self.nodes = np.array(sorted(self.nodes))
@property
def shape(self):
return len(self.nodes), self.dim
def __getitem__(self, item):
if isinstance(item, tuple):
item, *others = item
else:
others = ()
nodes = self.nodes[item]
out = self.get_coordinates(nodes)
if others:
return out[(slice(None), *others)]
else:
return out
def __array__(self, dtype=None):
# more efficient
out = np.zeros(self.shape, dtype=dtype)
return self.as_array(out)
def as_array(self, out=None):
if out is None:
out = np.zeros(self.shape)
index = np.empty((self.nodes.max() + 1,), dtype=np.int64)
index[self.nodes] = np.arange(len(self.nodes))
count = np.zeros((len(self.nodes),), dtype=np.int)
for patch in tqdm(self.patches, desc='get full mean embedding'):
nodes = patch.nodes
out[index[nodes]] += patch.coordinates
count[index[nodes]] += 1
out /= count[:, None]
return out
def get_coordinates(self, nodes, out=None):
nodes = np.asanyarray(nodes)
if out is None:
out = np.zeros((len(nodes), self.dim))
count = np.zeros((len(nodes),), dtype=np.int)
for patch in tqdm(self.patches, desc='get mean embedding'):
index = [i for i, node in enumerate(nodes) if node in patch.index]
count[index] += 1
out[index] += patch.get_coordinates(nodes[index])
out /= count[:, None]
return out
def __iadd__(self, other):
for patch in self.patches:
patch.coordinates += other
return self
def __isub__(self, other):
for patch in self.patches:
patch.coordinates -= other
return self
def __imul__(self, other):
for patch in self.patches:
patch.coordinates *= other
return self
def __itruediv__(self, other):
for patch in self.patches:
patch.coordinates /= other
return self
def __imatmul__(self, other):
for patch in self.patches:
patch.coordinates = patch.coordinates @ other
return self
def __copy__(self):
new = self.__new__(type(self))
new.__dict__.update(self.__dict__)
new.patches = [copy.copy(patch) for patch in self.patches]
return new
def __repr__(self):
return f'{self.__class__.__name__}({repr(self.patches)})'
| [
"numpy.savez",
"numpy.eye",
"numpy.asanyarray",
"numpy.lib.format.read_magic",
"numpy.array",
"numpy.zeros",
"numpy.lib.format.read_array_header_1_0",
"tqdm.auto.tqdm",
"copy.copy",
"numpy.load"
] | [((1374, 1409), 'numpy.asanyarray', 'np.asanyarray', (['self[:]'], {'dtype': 'dtype'}), '(self[:], dtype=dtype)\n', (1387, 1409), True, 'import numpy as np\n'), ((1541, 1556), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (1550, 1556), False, 'import copy\n'), ((1728, 1743), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (1737, 1743), False, 'import copy\n'), ((1915, 1930), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (1924, 1930), False, 'import copy\n'), ((2110, 2125), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (2119, 2125), False, 'import copy\n'), ((2303, 2318), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (2312, 2318), False, 'import copy\n'), ((3058, 3126), 'numpy.savez', 'np.savez', (['filename'], {'shift': 'self.shift', 'scale': 'self.scale', 'rot': 'self.rot'}), '(filename, shift=self.shift, scale=self.scale, rot=self.rot)\n', (3066, 3126), True, 'import numpy as np\n'), ((4825, 4862), 'numpy.load', 'np.load', (['self.filename'], {'mmap_mode': '"""r"""'}), "(self.filename, mmap_mode='r')\n", (4832, 4862), True, 'import numpy as np\n'), ((6299, 6332), 'numpy.zeros', 'np.zeros', (['self.shape'], {'dtype': 'dtype'}), '(self.shape, dtype=dtype)\n', (6307, 6332), True, 'import numpy as np\n'), ((6666, 6716), 'tqdm.auto.tqdm', 'tqdm', (['self.patches'], {'desc': '"""get full mean embedding"""'}), "(self.patches, desc='get full mean embedding')\n", (6670, 6716), False, 'from tqdm.auto import tqdm\n'), ((6952, 6972), 'numpy.asanyarray', 'np.asanyarray', (['nodes'], {}), '(nodes)\n', (6965, 6972), True, 'import numpy as np\n'), ((7124, 7169), 'tqdm.auto.tqdm', 'tqdm', (['self.patches'], {'desc': '"""get mean embedding"""'}), "(self.patches, desc='get mean embedding')\n", (7128, 7169), False, 'from tqdm.auto import tqdm\n'), ((2707, 2725), 'numpy.zeros', 'np.zeros', (['(1, dim)'], {}), '((1, dim))\n', (2715, 2725), True, 'import numpy as np\n'), ((2946, 2957), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (2952, 2957), True, 'import numpy as np\n'), ((2995, 3008), 'numpy.array', 'np.array', (['rot'], {}), '(rot)\n', (3003, 3008), True, 'import numpy as np\n'), ((4606, 4633), 'numpy.lib.format.read_magic', 'np.lib.format.read_magic', (['f'], {}), '(f)\n', (4630, 4633), True, 'import numpy as np\n'), ((4658, 4696), 'numpy.lib.format.read_array_header_1_0', 'np.lib.format.read_array_header_1_0', (['f'], {}), '(f)\n', (4693, 4696), True, 'import numpy as np\n'), ((6444, 6464), 'numpy.zeros', 'np.zeros', (['self.shape'], {}), '(self.shape)\n', (6452, 6464), True, 'import numpy as np\n'), ((8177, 8193), 'copy.copy', 'copy.copy', (['patch'], {}), '(patch)\n', (8186, 8193), False, 'import copy\n'), ((2765, 2780), 'numpy.array', 'np.array', (['shift'], {}), '(shift)\n', (2773, 2780), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import cal_joint_lps
import data_set_4
import mix_lp
def CalGamma(dataC, dataU, pD, pA, g1, g2, calc_type):
rt_c, rt_u, rt_cu = cal_joint_lps.CalJointLPS(dataC, dataU, g1, g2)
if calc_type == 0:
res = mix_lp.MyGetMixLP2(rt_cu, pA)
return res
lp = rt_c + mix_lp.MyGetMixLP2(rt_u, pA)
res = mix_lp.MyGetMixLP2(lp, pD)
return res
class CalGammaTestCase(unittest.TestCase):
def test_CalGamma(self):
dataC = data_set_4.dataC
dataU = data_set_4.dataU
# Test _calGamma_S.
g1 = [0, 1]
g2 = [2, 3]
res = CalGamma(dataC, dataU, 0.1, 0.1, g1, g2, 0)
print('_calGamma_S:')
print('res with cal_type = 0 -> {:3.5f} {}'.format(res, np.exp(res)))
self.assertFalse(abs(28.58330 - res) > 1e-5)
res = CalGamma(dataC, dataU, 0.1, 0.1, g1, g2, 1)
print('res with cal_type = 1 -> {:3.5f} {}'.format(res, np.exp(res)))
self.assertFalse(abs(-0.10349 - res) > 1e-5)
print('')
# Test _calGamma_E.
g1 = [0, 1]
g2 = [3] # Exclude `2' from g2
res = CalGamma(dataC, dataU, 0.1, 0.1, g1, g2, 0)
print('_calGamma_E:')
print('res with cal_type = 0 -> {:3.5f} {}'.format(res, np.exp(res)))
self.assertFalse(abs(30.52722 - res) > 1e-5)
res = CalGamma(dataC, dataU, 0.1, 0.1, g1, g2, 1)
print('res with cal_type = 1 -> {:3.5f} {}'.format(res, np.exp(res)))
self.assertFalse(abs(-0.0524233 - res) > 1e-5)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.exp",
"mix_lp.MyGetMixLP2",
"cal_joint_lps.CalJointLPS"
] | [((172, 219), 'cal_joint_lps.CalJointLPS', 'cal_joint_lps.CalJointLPS', (['dataC', 'dataU', 'g1', 'g2'], {}), '(dataC, dataU, g1, g2)\n', (197, 219), False, 'import cal_joint_lps\n'), ((363, 389), 'mix_lp.MyGetMixLP2', 'mix_lp.MyGetMixLP2', (['lp', 'pD'], {}), '(lp, pD)\n', (381, 389), False, 'import mix_lp\n'), ((1609, 1624), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1622, 1624), False, 'import unittest\n'), ((258, 287), 'mix_lp.MyGetMixLP2', 'mix_lp.MyGetMixLP2', (['rt_cu', 'pA'], {}), '(rt_cu, pA)\n', (276, 287), False, 'import mix_lp\n'), ((324, 352), 'mix_lp.MyGetMixLP2', 'mix_lp.MyGetMixLP2', (['rt_u', 'pA'], {}), '(rt_u, pA)\n', (342, 352), False, 'import mix_lp\n'), ((773, 784), 'numpy.exp', 'np.exp', (['res'], {}), '(res)\n', (779, 784), True, 'import numpy as np\n'), ((969, 980), 'numpy.exp', 'np.exp', (['res'], {}), '(res)\n', (975, 980), True, 'import numpy as np\n'), ((1311, 1322), 'numpy.exp', 'np.exp', (['res'], {}), '(res)\n', (1317, 1322), True, 'import numpy as np\n'), ((1507, 1518), 'numpy.exp', 'np.exp', (['res'], {}), '(res)\n', (1513, 1518), True, 'import numpy as np\n')] |
import os
import time
import pickle
import random
import numpy as np
import tensorflow as tf
import sys
from input import DataInput, DataInputTest
from model import Model
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=256, help="inference batch size")
args = parser.parse_args()
random.seed(1234)
np.random.seed(1234)
tf.set_random_seed(1234)
predict_batch_size = args.batch_size
predict_ads_num = 1
with open('dataset.pkl', 'rb') as f:
train_set = pickle.load(f)
test_set = pickle.load(f)
cate_list = pickle.load(f)
user_count, item_count, cate_count = pickle.load(f)
best_auc = 0.0
def _auc_arr(score):
score_p = score[:,0]
score_n = score[:,1]
#print "============== p ============="
#print score_p
#print "============== n ============="
#print score_n
score_arr = []
for s in score_p.tolist():
score_arr.append([0, 1, s])
for s in score_n.tolist():
score_arr.append([1, 0, s])
return score_arr
def _test(sess, model):
print('Round Batch size Recommendations / sec')
total_time = 0
perf_total = []
score_append = np.empty((predict_batch_size, predict_ads_num, 1), float)
iteration = 0
# warp up
for _, uij in DataInputTest(test_set, predict_batch_size):
score_ = model.test(sess,uij)
score_append = np.append(score_append, score_, axis = 0)
iteration += 1
if iteration == 5:
np.save('inference_' + str(predict_batch_size) +'.npy', score_append)
break
# start testing
time_st = time.time()
iteration = 0
for _, uij in DataInputTest(test_set, predict_batch_size):
if len(uij[0]) != predict_batch_size:
break
s_time = time.time()
score_ = model.test(sess, uij)
e_time = time.time()
total_time += e_time - s_time
iteration += 1
if iteration % 1000 == 0:
time_dur = time.time() - time_st
perf = predict_batch_size * iteration / time_dur
print(' %2i %4i %10.1f' % (iteration, predict_batch_size, perf ))
# break
# elif iteration % 100 == 0:
# time_dur = time.time() - time_st
# perf = predict_batch_size * iteration / time_dur
# print(' %2i %4i %10.1f' % (iteration, predict_batch_size, perf ))
time_dur = time.time() - time_st
perf = predict_batch_size * iteration / time_dur
print("Average performance is %10.1f for batch size=" % perf, predict_batch_size)
print("Total recommendations: %d" % len(test_set))
print("Approximate accelerator time in seconds: %.3f" % total_time)
print("Approximate accelerator performance in recommendations/second: %.3f" % (len(test_set)/total_time))
devices = ['/gpu:0']
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess:
model = Model(user_count, item_count, cate_count, cate_list, predict_batch_size, predict_ads_num, 1, devices)
model.restore(sess, 'save_path/ckpt')
_test(sess, model)
| [
"input.DataInputTest",
"model.Model",
"argparse.ArgumentParser",
"pickle.load",
"random.seed",
"numpy.append",
"numpy.empty",
"numpy.random.seed",
"time.time",
"tensorflow.ConfigProto",
"tensorflow.set_random_seed",
"tensorflow.GPUOptions"
] | [((197, 222), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (220, 222), False, 'import argparse\n'), ((339, 356), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (350, 356), False, 'import random\n'), ((357, 377), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (371, 377), True, 'import numpy as np\n'), ((378, 402), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1234)'], {}), '(1234)\n', (396, 402), True, 'import tensorflow as tf\n'), ((2698, 2730), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (2711, 2730), True, 'import tensorflow as tf\n'), ((514, 528), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (525, 528), False, 'import pickle\n'), ((542, 556), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (553, 556), False, 'import pickle\n'), ((571, 585), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (582, 585), False, 'import pickle\n'), ((625, 639), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (636, 639), False, 'import pickle\n'), ((1136, 1193), 'numpy.empty', 'np.empty', (['(predict_batch_size, predict_ads_num, 1)', 'float'], {}), '((predict_batch_size, predict_ads_num, 1), float)\n', (1144, 1193), True, 'import numpy as np\n'), ((1238, 1281), 'input.DataInputTest', 'DataInputTest', (['test_set', 'predict_batch_size'], {}), '(test_set, predict_batch_size)\n', (1251, 1281), False, 'from input import DataInput, DataInputTest\n'), ((1538, 1549), 'time.time', 'time.time', ([], {}), '()\n', (1547, 1549), False, 'import time\n'), ((1582, 1625), 'input.DataInputTest', 'DataInputTest', (['test_set', 'predict_batch_size'], {}), '(test_set, predict_batch_size)\n', (1595, 1625), False, 'from input import DataInput, DataInputTest\n'), ((2841, 2946), 'model.Model', 'Model', (['user_count', 'item_count', 'cate_count', 'cate_list', 'predict_batch_size', 'predict_ads_num', '(1)', 'devices'], {}), '(user_count, item_count, cate_count, cate_list, predict_batch_size,\n predict_ads_num, 1, devices)\n', (2846, 2946), False, 'from model import Model\n'), ((1336, 1375), 'numpy.append', 'np.append', (['score_append', 'score_'], {'axis': '(0)'}), '(score_append, score_, axis=0)\n', (1345, 1375), True, 'import numpy as np\n'), ((1696, 1707), 'time.time', 'time.time', ([], {}), '()\n', (1705, 1707), False, 'import time\n'), ((1756, 1767), 'time.time', 'time.time', ([], {}), '()\n', (1765, 1767), False, 'import time\n'), ((2274, 2285), 'time.time', 'time.time', ([], {}), '()\n', (2283, 2285), False, 'import time\n'), ((2754, 2820), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options', 'allow_soft_placement': '(True)'}), '(gpu_options=gpu_options, allow_soft_placement=True)\n', (2768, 2820), True, 'import tensorflow as tf\n'), ((1869, 1880), 'time.time', 'time.time', ([], {}), '()\n', (1878, 1880), False, 'import time\n')] |
#!/usr/bin/env python
import os
import time
import numpy as np
import pyscf
from pyscf.pbc.dft import multigrid
log = pyscf.lib.logger.Logger(verbose=5)
with open('/proc/cpuinfo') as f:
for line in f:
if 'model name' in line:
log.note(line[:-1])
break
with open('/proc/meminfo') as f:
log.note(f.readline()[:-1])
log.note('OMP_NUM_THREADS=%s\n', os.environ.get('OMP_NUM_THREADS', None))
boxlen = 12.4138
cell0 = pyscf.M(a = np.eye(3) * boxlen,
atom = """
O 12.235322 1.376642 10.869880
O 6.445390 3.706940 8.650794
O 0.085977 2.181322 8.276663
O 12.052554 2.671366 2.147199
O 12.250036 4.190930 12.092014
O 7.187422 0.959062 4.733469
O 8.346457 7.210040 4.667644
O 12.361546 11.527875 8.106887
O 3.299984 4.440816 9.193275
O 2.855829 3.759909 6.552815
O 1.392494 6.362753 0.586172
O 1.858645 8.694013 2.068738
O 3.770231 12.094519 8.652183
O 6.432508 3.669828 2.772418
O 1.998724 1.820217 4.876440
O 8.248581 2.404730 6.931303
O 5.753814 3.360029 12.461534
O 11.322212 5.649239 2.236798
O 4.277318 2.113956 10.590808
O 5.405015 3.349247 5.484702
O 6.493278 11.869958 0.684912
O 3.275250 2.346576 2.425241
O 7.981003 6.352512 7.507970
O 5.985990 6.512854 12.194648
O 10.636714 11.856872 12.209540
O 9.312283 3.670384 3.508594
O 1.106885 5.830301 6.638695
O 8.008007 3.326363 10.869818
O 12.403000 9.687405 11.761901
O 4.219782 7.085315 8.153470
O 3.781557 8.203821 11.563272
O 11.088898 4.532081 7.809475
O 10.387548 8.408890 1.017882
O 1.979016 6.418091 10.374159
O 4.660547 0.549666 5.617403
O 8.745880 12.256257 8.089383
O 2.662041 10.489890 0.092980
O 7.241661 10.471815 4.226946
O 2.276827 0.276647 10.810417
O 8.887733 0.946877 1.333885
O 1.943554 8.088552 7.567650
O 9.667942 8.056759 9.868847
O 10.905491 8.339638 6.484782
O 3.507733 4.862402 1.557439
O 8.010457 8.642846 12.055969
O 8.374446 10.035932 6.690309
O 5.635247 6.076875 5.563993
O 11.728434 1.601906 5.079475
O 9.771134 9.814114 3.548703
O 3.944355 10.563450 4.687536
O 0.890357 6.382287 4.065806
O 6.862447 6.425182 2.488202
O 3.813963 6.595122 3.762649
O 6.562448 8.295463 8.807182
O 9.809455 0.143325 3.886553
O 4.117074 11.661225 2.221679
O 5.295317 8.735561 2.763183
O 9.971999 5.379339 5.340378
O 12.254708 8.643874 3.957116
O 2.344274 10.761274 6.829162
O 7.013416 0.643488 10.518797
O 5.152349 10.233624 10.359388
O 11.184278 5.884064 10.298279
O 12.252335 8.974142 9.070831
H 12.415139 2.233125 11.257611
H 11.922476 1.573799 9.986994
H 5.608192 3.371543 8.971482
H 6.731226 3.060851 8.004962
H -0.169205 1.565594 7.589645
H -0.455440 2.954771 8.118939
H 12.125168 2.826463 1.205443
H 12.888828 2.969761 2.504745
H 11.553255 4.386613 11.465566
H 12.818281 4.960808 12.067151
H 7.049495 1.772344 4.247898
H 6.353019 0.798145 5.174047
H 7.781850 7.384852 5.420566
H 9.103203 6.754017 5.035898
H 12.771232 11.788645 8.931744
H 12.018035 10.650652 8.276334
H 3.557245 3.792529 9.848846
H 2.543844 4.884102 9.577958
H 2.320235 4.521250 6.329813
H 2.872128 3.749963 7.509824
H 1.209685 7.121391 1.140501
H 2.238885 6.038801 0.894245
H 2.763109 8.856353 2.336735
H 1.329379 9.047369 2.783755
H 4.315639 11.533388 9.203449
H 3.098742 12.433043 9.244412
H 5.987369 3.448974 3.590530
H 5.813096 3.419344 2.086985
H 1.057126 1.675344 4.969379
H 2.248496 2.292119 5.670892
H 8.508264 1.653337 7.464411
H 8.066015 2.034597 6.067646
H 5.197835 2.915542 11.821572
H 6.630900 3.329981 12.079371
H 10.788986 6.436672 2.127933
H 11.657923 5.463602 1.359832
H 3.544476 1.634958 10.977765
H 4.755770 1.455054 10.087655
H 4.465371 3.375459 5.665294
H 5.682663 4.264430 5.524498
H 6.174815 11.778676 1.582954
H 5.713640 12.089924 0.174999
H 3.476076 1.498708 2.028983
H 2.730229 2.134295 3.182949
H 7.119624 5.936450 7.474030
H 8.536492 5.799405 6.958665
H 5.909499 5.717477 11.667621
H 6.125402 6.196758 13.087330
H 11.203499 12.513536 11.804844
H 10.260930 12.300153 12.970145
H 9.985036 3.927685 2.878172
H 8.545584 3.468329 2.972331
H 1.399882 6.620092 7.093246
H 0.963561 6.112523 5.735345
H 8.067363 3.674002 9.979955
H 8.000737 2.375959 10.756190
H 11.821629 10.402510 12.020482
H 12.206854 8.983242 12.379892
H 3.461473 7.606485 7.889688
H 3.844478 6.304711 8.560946
H 3.179884 7.585614 11.148494
H 4.401957 7.652030 12.039573
H 11.573777 5.053211 7.169515
H 10.342076 4.186083 7.320831
H 10.065640 8.919194 1.760981
H 9.629585 8.322499 0.439729
H 1.396302 6.546079 9.625630
H 1.405516 6.479759 11.138049
H 4.024008 1.232518 5.405828
H 4.736858 0.579881 6.571077
H 9.452293 12.313381 8.732772
H 8.976559 11.502788 7.545965
H 1.834701 10.012311 0.153462
H 3.295197 9.836403 -0.204175
H 7.056724 11.401702 4.095264
H 6.499038 10.020287 3.825865
H 1.365541 0.487338 11.013887
H 2.501591 -0.428131 11.417871
H 8.644279 1.812362 1.005409
H 8.142674 0.388030 1.112955
H 1.272659 8.365063 8.191888
H 2.142485 8.877768 7.063867
H 8.961493 7.826192 9.265523
H 9.227102 8.487654 10.601118
H 10.150144 7.758934 6.392768
H 10.596082 9.187988 6.167290
H 3.463106 4.096188 2.129414
H 3.919461 4.539801 0.755791
H 7.418998 9.394959 12.028876
H 7.430413 7.883095 12.106546
H 7.972905 10.220334 5.841196
H 7.675111 9.631498 7.203725
H 5.332446 6.381336 6.419473
H 5.000025 6.434186 4.943466
H 11.575078 2.271167 4.412540
H 11.219802 0.847030 4.783357
H 8.865342 9.721516 3.843998
H 10.000732 10.719285 3.758898
H 3.186196 10.476397 5.265333
H 4.407331 11.335128 5.013723
H 0.558187 7.255936 3.859331
H 0.341672 5.789383 3.552346
H 7.459933 6.526049 3.229193
H 6.696228 5.483739 2.440372
H 3.864872 6.313007 2.849385
H 2.876419 6.621201 3.953862
H 5.631529 8.079145 8.753997
H 7.003296 7.568245 8.367822
H 9.615413 0.527902 3.031755
H 8.962985 0.109366 4.332162
H 3.825854 11.139182 1.474087
H 4.063988 11.063232 2.967211
H 5.784391 7.914558 2.708486
H 4.780461 8.655167 3.566110
H 10.880659 5.444664 5.046607
H 9.593331 4.687991 4.797350
H 11.562317 8.960134 3.376765
H 11.926084 8.816948 4.839320
H 2.856874 11.297981 7.433660
H 1.492332 11.195517 6.786033
H 7.145820 0.090200 9.749009
H 7.227275 0.077690 11.260665
H 4.662021 9.538430 10.798155
H 5.994537 9.833472 10.142985
H 10.544299 6.595857 10.301445
H 11.281750 5.653082 9.374494
H 12.103020 8.841164 10.006916
H 11.491592 8.576221 8.647557
""",
basis = 'gth-tzv2p',
pseudo = 'gth-pade',
max_memory = 50000,
precision = 1e-6)
for xc in ('lsda', 'pbe'):
for images in ([1,1,1], [2,1,1], [2,2,1], [2,2,2]):
cell = pbc.tools.super_cell(cell0, images)
nao = cell.nao
log.note('nao = %d', nao)
dm = np.random.random((nao,nao))
dm = dm + dm.T
mf = cell.RKS().set(xc=xc)
mf.with_df = multigrid.MultiGridFFTDF(cell)
cpu0 = time.clock(), time.time()
v = mf.get_veff(cell, dm)
log.timer('Fock build (xc=%s, nao=%d)' % (xc, nao), *cpu0)
| [
"numpy.eye",
"pyscf.pbc.dft.multigrid.MultiGridFFTDF",
"time.clock",
"numpy.random.random",
"os.environ.get",
"pyscf.lib.logger.Logger",
"time.time"
] | [((120, 154), 'pyscf.lib.logger.Logger', 'pyscf.lib.logger.Logger', ([], {'verbose': '(5)'}), '(verbose=5)\n', (143, 154), False, 'import pyscf\n'), ((388, 427), 'os.environ.get', 'os.environ.get', (['"""OMP_NUM_THREADS"""', 'None'], {}), "('OMP_NUM_THREADS', None)\n", (402, 427), False, 'import os\n'), ((9893, 9921), 'numpy.random.random', 'np.random.random', (['(nao, nao)'], {}), '((nao, nao))\n', (9909, 9921), True, 'import numpy as np\n'), ((10001, 10031), 'pyscf.pbc.dft.multigrid.MultiGridFFTDF', 'multigrid.MultiGridFFTDF', (['cell'], {}), '(cell)\n', (10025, 10031), False, 'from pyscf.pbc.dft import multigrid\n'), ((468, 477), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (474, 477), True, 'import numpy as np\n'), ((10048, 10060), 'time.clock', 'time.clock', ([], {}), '()\n', (10058, 10060), False, 'import time\n'), ((10062, 10073), 'time.time', 'time.time', ([], {}), '()\n', (10071, 10073), False, 'import time\n')] |
#!/usr/bin/python
'''
The MIT License (MIT)
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
#Functions for calculating delta in and out degree from CRC output edge tables
#First commit with some hard paths
#==========================================================================
#=============================DEPENDENCIES=================================
#==========================================================================
#requires bamliquidator and the linlab pipeline installed
#runs on an edge table from CRC output
import sys, os
# Get the script's full local path
whereAmI = os.path.dirname(os.path.realpath(__file__))
pipeline_dir = whereAmI.replace('/crc','')
print(pipeline_dir)
sys.path.append(whereAmI)
sys.path.append(pipeline_dir)
import pipeline_dfci
import utils
import string
import numpy
from scipy import stats
import os
import re
from collections import defaultdict
#==========================================================================
#============================PARAMETERS====================================
#==========================================================================
#hard coded parameters can go here during debug
#crc_folder <- standard CRC output
#chip_dataFile <- a linlab format data table w/ chip data to be mapped at edges
#analysis_name <- analysis name used in CRC. Typical CRC output files will include [ANALYSIS_NAME]_EDGE_TABLE.txt e.g. if NIBR_EDGE_TABLE.txt then analysis_name = NIBR
#group1_list,group2_list <- names of datasets in each group (specified in the chip_dataFile)
#output = path to write to. default will write to the CRC folder
#==========================================================================
#============================LIST OF DATAFILES=============================
#==========================================================================
#Example data file for a chip dataset
#ChIP-Seq
chip_dataFile = '%sdata_tables/NIBR_CHIP_TABLE.txt' % (projectFolder)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~CALCULATING CHANGES IN BRD4 OUT DEGREE BY TF EDGES~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def tf_edge_delta_out(crc_folder,chip_dataFile,analysis_name,group1_list,group2_list,output=''):
'''
calculates changes in brd4 out degree at each predicted motif occurrence this is by subpeaks
'''
crc_folder = utils.formatFolder(crc_folder,False)
edge_path = '%s%s_EDGE_TABLE.txt' % (crc_folder,analysis_name)
#make a gff of the edge table
edge_table = utils.parseTable(edge_path,'\t')
edge_gff = []
for line in edge_table[1:]:
gff_line = [line[2],'%s_%s' % (line[0],line[1]),'',line[3],line[4],'','.','','%s_%s' % (line[0],line[1])]
edge_gff.append(gff_line)
edge_gff_path = '%s%s_EDGE_TABLE.gff' % (crc_folder,analysis_name)
utils.unParseTable(edge_gff,edge_gff_path,'\t')
#direct the output to the crc folder
signal_path = '%s%s_EDGE_TABLE_signal.txt' % (crc_folder,analysis_name)
#get a list of all chip datasets
all_chip_list = group1_list + group2_list
if utils.checkOutput(signal_path,0,0) == False:
signal_table_list = pipeline_dfci.map_regions(chip_dataFile,[edge_gff_path],mappedFolder,signalFolder,all_chip_list,True,signal_path,extendReadsTo=100)
print(signal_table_list)
else:
print('Found previous signal table at %s' % (signal_path))
#now bring in the signal table as a dictionary using the locus line as the id
print('making log2 group1 over group2 signal table at edges')
signal_table = utils.parseTable(signal_path,'\t')
signal_dict = defaultdict(float)
#figure out columns for group1 and group2
group2_columns = [signal_table[0].index(name) for name in group2_list]
group1_columns = [signal_table[0].index(name) for name in group1_list]
group2_signal_vector = []
group1_signal_vector = []
for line in signal_table[1:]:
group2_signal = numpy.mean([float(line[col]) for col in group2_columns])
group1_signal = numpy.mean([float(line[col]) for col in group1_columns])
group2_signal_vector.append(group2_signal)
group1_signal_vector.append(group1_signal)
group2_median = numpy.median(group2_signal_vector)
group1_median = numpy.median(group1_signal_vector)
print('group2 median signal (rpm/bp)')
print(group2_median)
print('group1 median signal (rpm/bp)')
print(group1_median)
#now that we have the median, we can take edges where at least 1 edge is above the median
#and both are above zero and generate a new table w/ the fold change
signal_filtered_path = string.replace(signal_path,'.txt','_filtered.txt')
if utils.checkOutput(signal_filtered_path,0,0):
print('Found filtered signal table for edges at %s' % (signal_filtered_path))
signal_table_filtered = utils.parseTable(signal_filtered_path,'\t')
else:
signal_table_filtered = [signal_table[0]+['GROUP2_MEAN','GROUP1_MEAN','LOG2_GROUP1_OVER_GROUP2']]
for line in signal_table[1:]:
group2_signal = numpy.mean([float(line[col]) for col in group2_columns])
group1_signal = numpy.mean([float(line[col]) for col in group1_columns])
if (group2_signal > group2_median or group1_signal > group1_median) and min(group2_signal,group1_signal) >0:
delta = numpy.log2(group1_signal/group2_signal)
new_line = line + [group2_signal,group1_signal,delta]
signal_table_filtered.append(new_line)
utils.unParseTable(signal_table_filtered,signal_filtered_path,'\t')
#now get a list of all TFs in the system
tf_list = utils.uniquify([line[0].split('_')[0] for line in signal_table_filtered[1:]])
tf_list.sort()
print(tf_list)
out_degree_table = [['TF_NAME','EDGE_COUNT','DELTA_MEAN','DELTA_MEDIAN','DELTA_STD','DELTA_SEM']]
for tf_name in tf_list:
print(tf_name)
edge_vector = [float(line[-1]) for line in signal_table_filtered[1:] if line[0].split('_')[0] == tf_name]
edge_count = len(edge_vector)
delta_mean = round(numpy.mean(edge_vector),4)
delta_median = round(numpy.median(edge_vector),4)
delta_std = round(numpy.std(edge_vector),4)
delta_sem = round(stats.sem(edge_vector),4)
tf_out_line = [tf_name,edge_count,delta_mean,delta_median,delta_std,delta_sem]
out_degree_table.append(tf_out_line)
if output == '':
#set final output
output_path = '%s%s_EDGE_DELTA_OUT.txt' % (crc_folder,analysis_name)
else:
output_path = output
utils.unParseTable(out_degree_table,output_path,'\t')
print(output_path)
return(output_path)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~CALCULATING CHANGES IN BRD4 IN DEGREE AT TFS~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def tf_edge_delta_in(crc_folder,chip_dataFile,analysis_name,group1_list,group2_list,output=''):
'''
calculates changes in BRD4 at in degree edges
'''
crc_folder = utils.formatFolder(crc_folder,False)
enhancer_tf_path = '%s%s_ENHANCER_TF_TABLE.txt' % (crc_folder,analysis_name)
#make a gff of the edge table
enhancer_tf_table = utils.parseTable(enhancer_tf_path,'\t')
enhancer_tf_gff = []
for line in enhancer_tf_table[1:]:
gff_line = [line[1],line[0],'',line[2],line[3],'','.','',line[0]]
enhancer_tf_gff.append(gff_line)
enhancer_tf_gff_path = '%s%s_ENHANCER_TF_TABLE.gff' % (crc_folder,analysis_name)
utils.unParseTable(enhancer_tf_gff,enhancer_tf_gff_path,'\t')
#direct the output to the crc folder
signal_path = '%s%s_ENHANCER_TF_TABLE_signal.txt' % (crc_folder,analysis_name)
all_chip_list = group1_list + group2_list
if utils.checkOutput(signal_path,0,0) == False:
signal_table_list = pipeline_dfci.map_regions(chip_dataFile,[enhancer_tf_gff_path],mappedFolder,signalFolder,all_chip_list,True,signal_path,extendReadsTo=100)
print(signal_table_list)
else:
print('Found previous signal table at %s' % (signal_path))
#now bring in the signal table as a dictionary using the locus line as the id
print('making an enhancer signal dict')
signal_table = utils.parseTable(signal_path,'\t')
group1_signal_dict = defaultdict(float)
group2_signal_dict = defaultdict(float)
#signal here is calculated as AUC
#figure out columns for group2 and group1
group2_columns = [signal_table[0].index(name) for name in group2_list]
group1_columns = [signal_table[0].index(name) for name in group1_list]
for line in signal_table[1:]:
region_coords = [int(x) for x in line[1].split(':')[-1].split('-')]
region_length = region_coords[1] - region_coords[0]
group2_signal = region_length*numpy.mean([float(line[col]) for col in group2_columns])
group1_signal = region_length*numpy.mean([float(line[col]) for col in group1_columns])
group1_signal_dict[line[0]] = group1_signal
group2_signal_dict[line[0]] = group2_signal
#now grab the gene table
gene_tf_path = '%s%s_GENE_TF_TABLE.txt' % (crc_folder,analysis_name)
gene_tf_table = utils.parseTable(gene_tf_path,'\t')
group1_tf_dict = defaultdict(float)
group2_tf_dict = defaultdict(float)
for line in gene_tf_table[1:]:
group1_tf_dict[line[0]] += group1_signal_dict[line[-1]]
group2_tf_dict[line[0]] += group2_signal_dict[line[-1]]
tf_list = utils.uniquify([line[0] for line in gene_tf_table[1:]])
tf_list.sort()
print(tf_list)
in_degree_table = [['TF_NAME','GROUP1_IN','GROUP2_IN','LOG2_GROUP1_vs_GROUP2']]
for tf_name in tf_list:
group1_signal = round(group1_tf_dict[tf_name],4)
group2_signal = round(group2_tf_dict[tf_name],4)
delta = round(numpy.log2(group1_signal/group2_signal),4)
new_line = [tf_name,group1_signal,group2_signal,delta]
in_degree_table.append(new_line)
if output == '':
#set final output
output_path = '%s%s_TF_DELTA_IN.txt' % (crc_folder,analysis_name)
else:
output_path = output
utils.unParseTable(in_degree_table,output_path,'\t')
print(output_path)
return(output_path)
| [
"numpy.mean",
"numpy.median",
"string.replace",
"utils.checkOutput",
"utils.uniquify",
"numpy.log2",
"utils.formatFolder",
"os.path.realpath",
"collections.defaultdict",
"pipeline_dfci.map_regions",
"scipy.stats.sem",
"numpy.std",
"utils.unParseTable",
"utils.parseTable",
"sys.path.appen... | [((1716, 1741), 'sys.path.append', 'sys.path.append', (['whereAmI'], {}), '(whereAmI)\n', (1731, 1741), False, 'import sys, os\n'), ((1742, 1771), 'sys.path.append', 'sys.path.append', (['pipeline_dir'], {}), '(pipeline_dir)\n', (1757, 1771), False, 'import sys, os\n'), ((1623, 1649), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1639, 1649), False, 'import os\n'), ((3452, 3489), 'utils.formatFolder', 'utils.formatFolder', (['crc_folder', '(False)'], {}), '(crc_folder, False)\n', (3470, 3489), False, 'import utils\n'), ((3608, 3641), 'utils.parseTable', 'utils.parseTable', (['edge_path', '"""\t"""'], {}), "(edge_path, '\\t')\n", (3624, 3641), False, 'import utils\n'), ((3915, 3964), 'utils.unParseTable', 'utils.unParseTable', (['edge_gff', 'edge_gff_path', '"""\t"""'], {}), "(edge_gff, edge_gff_path, '\\t')\n", (3933, 3964), False, 'import utils\n'), ((4667, 4702), 'utils.parseTable', 'utils.parseTable', (['signal_path', '"""\t"""'], {}), "(signal_path, '\\t')\n", (4683, 4702), False, 'import utils\n'), ((4720, 4738), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (4731, 4738), False, 'from collections import defaultdict\n'), ((5328, 5362), 'numpy.median', 'numpy.median', (['group2_signal_vector'], {}), '(group2_signal_vector)\n', (5340, 5362), False, 'import numpy\n'), ((5383, 5417), 'numpy.median', 'numpy.median', (['group1_signal_vector'], {}), '(group1_signal_vector)\n', (5395, 5417), False, 'import numpy\n'), ((5752, 5804), 'string.replace', 'string.replace', (['signal_path', '""".txt"""', '"""_filtered.txt"""'], {}), "(signal_path, '.txt', '_filtered.txt')\n", (5766, 5804), False, 'import string\n'), ((5810, 5855), 'utils.checkOutput', 'utils.checkOutput', (['signal_filtered_path', '(0)', '(0)'], {}), '(signal_filtered_path, 0, 0)\n', (5827, 5855), False, 'import utils\n'), ((7748, 7803), 'utils.unParseTable', 'utils.unParseTable', (['out_degree_table', 'output_path', '"""\t"""'], {}), "(out_degree_table, output_path, '\\t')\n", (7766, 7803), False, 'import utils\n'), ((8261, 8298), 'utils.formatFolder', 'utils.formatFolder', (['crc_folder', '(False)'], {}), '(crc_folder, False)\n', (8279, 8298), False, 'import utils\n'), ((8438, 8478), 'utils.parseTable', 'utils.parseTable', (['enhancer_tf_path', '"""\t"""'], {}), "(enhancer_tf_path, '\\t')\n", (8454, 8478), False, 'import utils\n'), ((8747, 8810), 'utils.unParseTable', 'utils.unParseTable', (['enhancer_tf_gff', 'enhancer_tf_gff_path', '"""\t"""'], {}), "(enhancer_tf_gff, enhancer_tf_gff_path, '\\t')\n", (8765, 8810), False, 'import utils\n'), ((9465, 9500), 'utils.parseTable', 'utils.parseTable', (['signal_path', '"""\t"""'], {}), "(signal_path, '\\t')\n", (9481, 9500), False, 'import utils\n'), ((9525, 9543), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (9536, 9543), False, 'from collections import defaultdict\n'), ((9569, 9587), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (9580, 9587), False, 'from collections import defaultdict\n'), ((10420, 10456), 'utils.parseTable', 'utils.parseTable', (['gene_tf_path', '"""\t"""'], {}), "(gene_tf_path, '\\t')\n", (10436, 10456), False, 'import utils\n'), ((10477, 10495), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (10488, 10495), False, 'from collections import defaultdict\n'), ((10517, 10535), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (10528, 10535), False, 'from collections import defaultdict\n'), ((10719, 10774), 'utils.uniquify', 'utils.uniquify', (['[line[0] for line in gene_tf_table[1:]]'], {}), '([line[0] for line in gene_tf_table[1:]])\n', (10733, 10774), False, 'import utils\n'), ((11395, 11449), 'utils.unParseTable', 'utils.unParseTable', (['in_degree_table', 'output_path', '"""\t"""'], {}), "(in_degree_table, output_path, '\\t')\n", (11413, 11449), False, 'import utils\n'), ((4180, 4216), 'utils.checkOutput', 'utils.checkOutput', (['signal_path', '(0)', '(0)'], {}), '(signal_path, 0, 0)\n', (4197, 4216), False, 'import utils\n'), ((4253, 4395), 'pipeline_dfci.map_regions', 'pipeline_dfci.map_regions', (['chip_dataFile', '[edge_gff_path]', 'mappedFolder', 'signalFolder', 'all_chip_list', '(True)', 'signal_path'], {'extendReadsTo': '(100)'}), '(chip_dataFile, [edge_gff_path], mappedFolder,\n signalFolder, all_chip_list, True, signal_path, extendReadsTo=100)\n', (4278, 4395), False, 'import pipeline_dfci\n'), ((5973, 6017), 'utils.parseTable', 'utils.parseTable', (['signal_filtered_path', '"""\t"""'], {}), "(signal_filtered_path, '\\t')\n", (5989, 6017), False, 'import utils\n'), ((6666, 6735), 'utils.unParseTable', 'utils.unParseTable', (['signal_table_filtered', 'signal_filtered_path', '"""\t"""'], {}), "(signal_table_filtered, signal_filtered_path, '\\t')\n", (6684, 6735), False, 'import utils\n'), ((8996, 9032), 'utils.checkOutput', 'utils.checkOutput', (['signal_path', '(0)', '(0)'], {}), '(signal_path, 0, 0)\n', (9013, 9032), False, 'import utils\n'), ((9069, 9222), 'pipeline_dfci.map_regions', 'pipeline_dfci.map_regions', (['chip_dataFile', '[enhancer_tf_gff_path]', 'mappedFolder', 'signalFolder', 'all_chip_list', '(True)', 'signal_path'], {'extendReadsTo': '(100)'}), '(chip_dataFile, [enhancer_tf_gff_path],\n mappedFolder, signalFolder, all_chip_list, True, signal_path,\n extendReadsTo=100)\n', (9094, 9222), False, 'import pipeline_dfci\n'), ((7249, 7272), 'numpy.mean', 'numpy.mean', (['edge_vector'], {}), '(edge_vector)\n', (7259, 7272), False, 'import numpy\n'), ((7305, 7330), 'numpy.median', 'numpy.median', (['edge_vector'], {}), '(edge_vector)\n', (7317, 7330), False, 'import numpy\n'), ((7360, 7382), 'numpy.std', 'numpy.std', (['edge_vector'], {}), '(edge_vector)\n', (7369, 7382), False, 'import numpy\n'), ((7412, 7434), 'scipy.stats.sem', 'stats.sem', (['edge_vector'], {}), '(edge_vector)\n', (7421, 7434), False, 'from scipy import stats\n'), ((11076, 11117), 'numpy.log2', 'numpy.log2', (['(group1_signal / group2_signal)'], {}), '(group1_signal / group2_signal)\n', (11086, 11117), False, 'import numpy\n'), ((6492, 6533), 'numpy.log2', 'numpy.log2', (['(group1_signal / group2_signal)'], {}), '(group1_signal / group2_signal)\n', (6502, 6533), False, 'import numpy\n')] |
import multiprocessing as mp
import logging
import traceback
from numba.cuda.testing import unittest, CUDATestCase
from numba.cuda.testing import skip_on_cudasim, xfail_with_cuda_python
def child_test():
from numba import cuda, int32, void
from numba.core import config
import io
import numpy as np
import threading
# Enable PTDS before we make any CUDA driver calls. Enabling it first
# ensures that PTDS APIs are used because the CUDA driver looks up API
# functions on first use and memoizes them.
config.CUDA_PER_THREAD_DEFAULT_STREAM = 1
# Set up log capture for the Driver API so we can see what API calls were
# used.
logbuf = io.StringIO()
handler = logging.StreamHandler(logbuf)
cudadrv_logger = logging.getLogger('numba.cuda.cudadrv.driver')
cudadrv_logger.addHandler(handler)
cudadrv_logger.setLevel(logging.DEBUG)
# Set up data for our test, and copy over to the device
N = 2 ** 16
N_THREADS = 10
N_ADDITIONS = 4096
# Seed the RNG for repeatability
np.random.seed(1)
x = np.random.randint(low=0, high=1000, size=N, dtype=np.int32)
r = np.zeros_like(x)
# One input and output array for each thread
xs = [cuda.to_device(x) for _ in range(N_THREADS)]
rs = [cuda.to_device(r) for _ in range(N_THREADS)]
# Compute the grid size and get the [per-thread] default stream
n_threads = 256
n_blocks = N // n_threads
stream = cuda.default_stream()
# A simple multiplication-by-addition kernel. What it does exactly is not
# too important; only that we have a kernel that does something.
@cuda.jit(void(int32[::1], int32[::1]))
def f(r, x):
i = cuda.grid(1)
if i > len(r):
return
# Accumulate x into r
for j in range(N_ADDITIONS):
r[i] += x[i]
# This function will be used to launch the kernel from each thread on its
# own unique data.
def kernel_thread(n):
f[n_blocks, n_threads, stream](rs[n], xs[n])
# Create threads
threads = [threading.Thread(target=kernel_thread, args=(i,))
for i in range(N_THREADS)]
# Start all threads
for thread in threads:
thread.start()
# Wait for all threads to finish, to ensure that we don't synchronize with
# the device until all kernels are scheduled.
for thread in threads:
thread.join()
# Synchronize with the device
cuda.synchronize()
# Check output is as expected
expected = x * N_ADDITIONS
for i in range(N_THREADS):
np.testing.assert_equal(rs[i].copy_to_host(), expected)
# Return the driver log output to the calling process for checking
handler.flush()
return logbuf.getvalue()
def child_test_wrapper(result_queue):
try:
output = child_test()
success = True
# Catch anything raised so it can be propagated
except: # noqa: E722
output = traceback.format_exc()
success = False
result_queue.put((success, output))
@skip_on_cudasim('Streams not supported on the simulator')
class TestPTDS(CUDATestCase):
@xfail_with_cuda_python
def test_ptds(self):
# Run a test with PTDS enabled in a child process
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
proc = ctx.Process(target=child_test_wrapper, args=(result_queue,))
proc.start()
proc.join()
success, output = result_queue.get()
# Ensure the child process ran to completion before checking its output
if not success:
self.fail(output)
# Functions with a per-thread default stream variant that we expect to
# see in the output
ptds_functions = ('cuMemcpyHtoD_v2_ptds', 'cuLaunchKernel_ptsz',
'cuMemcpyDtoH_v2_ptds')
for fn in ptds_functions:
with self.subTest(fn=fn, expected=True):
self.assertIn(fn, output)
# Non-PTDS versions of the functions that we should not see in the
# output:
legacy_functions = ('cuMemcpyHtoD_v2', 'cuLaunchKernel',
'cuMemcpyDtoH_v2')
for fn in legacy_functions:
with self.subTest(fn=fn, expected=False):
# Ensure we only spot these function names appearing without a
# _ptds or _ptsz suffix by checking including the end of the
# line in the log
fn_at_end = f'{fn}\n'
self.assertNotIn(fn_at_end, output)
if __name__ == '__main__':
unittest.main()
| [
"logging.getLogger",
"traceback.format_exc",
"logging.StreamHandler",
"numba.cuda.default_stream",
"numba.cuda.grid",
"multiprocessing.get_context",
"numpy.random.randint",
"numba.cuda.testing.unittest.main",
"numba.cuda.synchronize",
"numpy.random.seed",
"numba.cuda.to_device",
"numba.void",
... | [((3042, 3099), 'numba.cuda.testing.skip_on_cudasim', 'skip_on_cudasim', (['"""Streams not supported on the simulator"""'], {}), "('Streams not supported on the simulator')\n", (3057, 3099), False, 'from numba.cuda.testing import skip_on_cudasim, xfail_with_cuda_python\n'), ((687, 700), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (698, 700), False, 'import io\n'), ((715, 744), 'logging.StreamHandler', 'logging.StreamHandler', (['logbuf'], {}), '(logbuf)\n', (736, 744), False, 'import logging\n'), ((766, 812), 'logging.getLogger', 'logging.getLogger', (['"""numba.cuda.cudadrv.driver"""'], {}), "('numba.cuda.cudadrv.driver')\n", (783, 812), False, 'import logging\n'), ((1056, 1073), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1070, 1073), True, 'import numpy as np\n'), ((1082, 1141), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(1000)', 'size': 'N', 'dtype': 'np.int32'}), '(low=0, high=1000, size=N, dtype=np.int32)\n', (1099, 1141), True, 'import numpy as np\n'), ((1150, 1166), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (1163, 1166), True, 'import numpy as np\n'), ((1459, 1480), 'numba.cuda.default_stream', 'cuda.default_stream', ([], {}), '()\n', (1478, 1480), False, 'from numba import cuda, int32, void\n'), ((2454, 2472), 'numba.cuda.synchronize', 'cuda.synchronize', ([], {}), '()\n', (2470, 2472), False, 'from numba import cuda, int32, void\n'), ((4582, 4597), 'numba.cuda.testing.unittest.main', 'unittest.main', ([], {}), '()\n', (4595, 4597), False, 'from numba.cuda.testing import unittest, CUDATestCase\n'), ((1227, 1244), 'numba.cuda.to_device', 'cuda.to_device', (['x'], {}), '(x)\n', (1241, 1244), False, 'from numba import cuda, int32, void\n'), ((1282, 1299), 'numba.cuda.to_device', 'cuda.to_device', (['r'], {}), '(r)\n', (1296, 1299), False, 'from numba import cuda, int32, void\n'), ((1702, 1714), 'numba.cuda.grid', 'cuda.grid', (['(1)'], {}), '(1)\n', (1711, 1714), False, 'from numba import cuda, int32, void\n'), ((1643, 1671), 'numba.void', 'void', (['int32[::1]', 'int32[::1]'], {}), '(int32[::1], int32[::1])\n', (1647, 1671), False, 'from numba import cuda, int32, void\n'), ((2069, 2118), 'threading.Thread', 'threading.Thread', ([], {'target': 'kernel_thread', 'args': '(i,)'}), '(target=kernel_thread, args=(i,))\n', (2085, 2118), False, 'import threading\n'), ((3255, 3278), 'multiprocessing.get_context', 'mp.get_context', (['"""spawn"""'], {}), "('spawn')\n", (3269, 3278), True, 'import multiprocessing as mp\n'), ((2951, 2973), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2971, 2973), False, 'import traceback\n')] |
# import the necessary packages(we used only numpy and matplotlib :D )
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import time
def rectangle(img,x,y,w,h,t):
img[y-t:y,x-t:x+w+t,:]=255
img[y-t:y+h+t,x+w:x+w+t,:]=255
img[y+h:y+h+t,x-t:x+w+t,:]=255
img[y-t:y+h+t,x-t:x,:]=255
return img
def GetBilinearPixel(imArr, posX, posY):
out = []
#Get integer and fractional parts of numbers
modXi = int(posX)
modYi = int(posY)
modXf = posX - modXi
modYf = posY - modYi
modXiPlusOneLim = min(modXi+1,imArr.shape[1]-1)
modYiPlusOneLim = min(modYi+1,imArr.shape[0]-1)
#Get pixels in four corners
#for chan in range(imArr.shape[2]):
bl = imArr[modYi, modXi]
br = imArr[modYi, modXiPlusOneLim]
tl = imArr[modYiPlusOneLim, modXi]
tr = imArr[modYiPlusOneLim, modXiPlusOneLim]
#Calculate interpolation
b = modXf * br + (1. - modXf) * bl
t = modXf * tr + (1. - modXf) * tl
pxf = modYf * t + (1. - modYf) * b
out.append(int(pxf+0.5))
return out[0]
def re_size(im,x):
enlargedShape = tuple(map(int, [im.shape[0]*x, im.shape[1]*x]))
enlargedImg = np.zeros(enlargedShape)
rowScale = float(im.shape[0]) / float(enlargedImg.shape[0])
colScale = float(im.shape[1]) / float(enlargedImg.shape[1])
for r in range(enlargedImg.shape[0]):
for c in range(enlargedImg.shape[1]):
orir = r * rowScale #Find position in original image
oric = c * colScale
enlargedImg[r, c] = GetBilinearPixel(im, oric, orir)
return enlargedImg
def rgbtogray(img):
#R,G and B are seperately multiplied with respective weights
#and added together to give a 2D image
img=img[:,:,0]*0.299+img[:,:,1]*0.587+img[:,:,2]*0.114
return img
#edge-detection
#########################################
#########################################
###########
def convolution(image, kernel):
#height and width of image are collected
image_row, image_col = image.shape
#height and width of kernel are collected
kernel_row, kernel_col = kernel.shape
#creating an empty(black) image sized matrix
output = np.zeros(image.shape)
#obtaining padding dimensions from kernel shape
pad_height = kernel_row//2
pad_width = kernel_col//2
#adding the padding around the image
padded_image = np.zeros((image_row + (2 * pad_height), image_col + (2 * pad_width)))
padded_image[pad_height:padded_image.shape[0] - pad_height, pad_width:padded_image.shape[1] - pad_width] = image
#actual 2D convolution as follows
for row in range(image_row):
for col in range(image_col):
output[row, col] = np.sum(kernel * padded_image[row:row + kernel_row, col:col + kernel_col])
return output
###########
def sobel_edge_detection(image, filter, convert_to_degree=False):
#convolution in x direction
new_image_x = convolution(image, filter)
#convolution in y direction
new_image_y = convolution(image, np.flip(filter.T, axis=0))
#calculating magnitude
gradient_magnitude = np.sqrt(np.square(new_image_x) + np.square(new_image_y))
gradient_magnitude *= 255.0 / gradient_magnitude.max()
gradient_direction = np.arctan2(new_image_y, new_image_x)
if convert_to_degree:
gradient_direction = np.rad2deg(gradient_direction)
gradient_direction += 180
return gradient_magnitude, gradient_direction
#########
def Gauss(img):
height,width = img.shape
img_out=np.zeros(img.shape)
#convolution with a 5*5 gaussian filter
gauss = (1.0 / 273) * np.array(
[[1, 4, 7, 4, 1],
[4, 16, 26, 16, 4],
[7, 26, 41, 26, 7],
[4, 16, 26, 16, 4],
[1, 4, 7, 4, 1]])
img_out=convolution(img,gauss)
return img_out
###########
def non_max_suppression(gradient_magnitude, gradient_direction):
image_row, image_col = gradient_magnitude.shape
output = np.zeros(gradient_magnitude.shape)
PI = 180
for row in range(1, image_row - 1):
for col in range(1, image_col - 1):
direction = gradient_direction[row, col]
if (0 <= direction < PI / 8) or (15 * PI / 8 <= direction <= 2 * PI):
before_pixel = gradient_magnitude[row, col - 1]
after_pixel = gradient_magnitude[row, col + 1]
elif (PI / 8 <= direction < 3 * PI / 8) or (9 * PI / 8 <= direction < 11 * PI / 8):
before_pixel = gradient_magnitude[row + 1, col - 1]
after_pixel = gradient_magnitude[row - 1, col + 1]
elif (3 * PI / 8 <= direction < 5 * PI / 8) or (11 * PI / 8 <= direction < 13 * PI / 8):
before_pixel = gradient_magnitude[row - 1, col]
after_pixel = gradient_magnitude[row + 1, col]
else:
before_pixel = gradient_magnitude[row - 1, col - 1]
after_pixel = gradient_magnitude[row + 1, col + 1]
if gradient_magnitude[row, col] >= before_pixel and gradient_magnitude[row, col] >= after_pixel:
output[row, col] = gradient_magnitude[row, col]
return output
###########
def threshold(image,low, high,weak):
output = np.zeros(image.shape)
strong_row, strong_col = np.where(image >= high)
weak_row, weak_col = np.where((image <= high) & (image >= low))
output[strong_row, strong_col] = 255
output[weak_row, weak_col] = weak
return output
###########
def hysteresis(img, weak, strong):
M, N = img.shape
for i in range(1, M-1):
for j in range(1, N-1):
if (img[i,j] == weak):
try:
if ((img[i+1, j-1] == strong) or (img[i+1, j] == strong) or (img[i+1, j+1] == strong)
or (img[i, j-1] == strong) or (img[i, j+1] == strong)
or (img[i-1, j-1] == strong) or (img[i-1, j] == strong) or (img[i-1, j+1] == strong)):
img[i, j] = strong
else:
img[i, j] = 0
except IndexError as e:
pass
return img
#######
def edge_detection(img):
img_gauss=Gauss(img)
edge_filter = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
gradient_magnitude, gradient_direction = sobel_edge_detection(img_gauss, edge_filter, convert_to_degree=True)
n_max_sup=non_max_suppression(gradient_magnitude, gradient_direction)
img_threshold=threshold(n_max_sup,5,20,25)
new_image = hysteresis(img_threshold,25,255)
return new_image
#####
def corrcoef2(a, b):
return np.sum(a*b) / np.sqrt(np.sum(a*a) * np.sum(b*b))
def matchtemplate(img, kernel):
img_h, img_w = img.shape
kernel_h, kernel_w = kernel.shape
h, w = kernel_h // 2, kernel_w // 2
image_conv = np.zeros(img.shape)
coeff = 0
coeff2=0
(x,y)=(0,0)
patched=np.zeros(kernel.shape)
for i in range(h, img_h - h):
for j in range(w, img_w - w):
patch = img[i - h:i - h + kernel_h, j - w:j - w + kernel_w]
coeff = corrcoef2(patch, kernel)
if coeff >= coeff2:
(x,y)=(i-h,j-w)
coeff2=coeff
patched=patch
return (x,y),coeff2,patched
#####
#####
if __name__=='__main__':
st=time.time()
template1=mpimg.imread('logo.jpg')
template = np.array(template1)
template = rgbtogray(template)
template = edge_detection(template)
print('completed the edge detection of template!')
for j in range(1,4):
image = np.array(mpimg.imread('image'+str(j)+'.jpg'))
if j==1 :
show1=image
elif j==2 :
show2=image
else :
show3=image
gray1 = rgbtogray(image)
gray = edge_detection(gray1)
print('completed the edge detection of image'+str(j))
(tH,tW)=template.shape
found=(0,(0,0),0)
print('started template-matching for image'+str(j))
for scale in np.linspace(0.2,1.0,5)[::-1]:
resized=re_size(gray,scale)
r = gray.shape[1] / float(resized.shape[1])
if resized.shape[0] < tH or resized.shape[1] < tW:
break
(maxLoc,maxVal,patch)=matchtemplate(resized,template)
if found==(0,(0,0),0) or maxVal > found[0]:
found,patch2,final_scale = (maxVal, maxLoc, r),patch,scale
print('completed template-matching for image'+str(j))
(_, maxLoc, r) = found
if j==1:
show1=rectangle(show1,int(maxLoc[1]*r),int(maxLoc[0]*r),int(tW*r),int(tH*r),5)
elif j==2:
show2=rectangle(show2,int(maxLoc[1]*r),int(maxLoc[0]*r),int(tW*r),int(tH*r),5)
else:
show3=rectangle(show3,int(maxLoc[1]*r),int(maxLoc[0]*r),int(tW*r),int(tH*r),5)
#display
plt.subplot(221),plt.imshow(template1)
plt.title('template')
plt.subplot(222),plt.imshow(show1)
plt.title('image1')
plt.subplot(223),plt.imshow(show2)
plt.title('image2')
plt.subplot(224),plt.imshow(show3)
plt.title('image3')
sp=time.time()
print('Time-taken : '+str(sp-st)+' sec')
print('Thankyou!')
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.flip",
"numpy.where",
"matplotlib.image.imread",
"numpy.square",
"matplotlib.pyplot.subplot",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.arctan2",
"numpy.linspace",
"matplotlib.pyplot.title",
"numpy.rad2deg",
"time.time",
"matplotlib.pyplot.show"... | [((1315, 1338), 'numpy.zeros', 'np.zeros', (['enlargedShape'], {}), '(enlargedShape)\n', (1323, 1338), True, 'import numpy as np\n'), ((2419, 2440), 'numpy.zeros', 'np.zeros', (['image.shape'], {}), '(image.shape)\n', (2427, 2440), True, 'import numpy as np\n'), ((2623, 2688), 'numpy.zeros', 'np.zeros', (['(image_row + 2 * pad_height, image_col + 2 * pad_width)'], {}), '((image_row + 2 * pad_height, image_col + 2 * pad_width))\n', (2631, 2688), True, 'import numpy as np\n'), ((3525, 3561), 'numpy.arctan2', 'np.arctan2', (['new_image_y', 'new_image_x'], {}), '(new_image_y, new_image_x)\n', (3535, 3561), True, 'import numpy as np\n'), ((3809, 3828), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (3817, 3828), True, 'import numpy as np\n'), ((4263, 4297), 'numpy.zeros', 'np.zeros', (['gradient_magnitude.shape'], {}), '(gradient_magnitude.shape)\n', (4271, 4297), True, 'import numpy as np\n'), ((5556, 5577), 'numpy.zeros', 'np.zeros', (['image.shape'], {}), '(image.shape)\n', (5564, 5577), True, 'import numpy as np\n'), ((5608, 5631), 'numpy.where', 'np.where', (['(image >= high)'], {}), '(image >= high)\n', (5616, 5631), True, 'import numpy as np\n'), ((5658, 5700), 'numpy.where', 'np.where', (['((image <= high) & (image >= low))'], {}), '((image <= high) & (image >= low))\n', (5666, 5700), True, 'import numpy as np\n'), ((6571, 6617), 'numpy.array', 'np.array', (['[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]'], {}), '([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\n', (6579, 6617), True, 'import numpy as np\n'), ((7186, 7205), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (7194, 7205), True, 'import numpy as np\n'), ((7265, 7287), 'numpy.zeros', 'np.zeros', (['kernel.shape'], {}), '(kernel.shape)\n', (7273, 7287), True, 'import numpy as np\n'), ((7691, 7702), 'time.time', 'time.time', ([], {}), '()\n', (7700, 7702), False, 'import time\n'), ((7718, 7742), 'matplotlib.image.imread', 'mpimg.imread', (['"""logo.jpg"""'], {}), "('logo.jpg')\n", (7730, 7742), True, 'import matplotlib.image as mpimg\n'), ((7759, 7778), 'numpy.array', 'np.array', (['template1'], {}), '(template1)\n', (7767, 7778), True, 'import numpy as np\n'), ((9509, 9530), 'matplotlib.pyplot.title', 'plt.title', (['"""template"""'], {}), "('template')\n", (9518, 9530), True, 'import matplotlib.pyplot as plt\n'), ((9576, 9595), 'matplotlib.pyplot.title', 'plt.title', (['"""image1"""'], {}), "('image1')\n", (9585, 9595), True, 'import matplotlib.pyplot as plt\n'), ((9641, 9660), 'matplotlib.pyplot.title', 'plt.title', (['"""image2"""'], {}), "('image2')\n", (9650, 9660), True, 'import matplotlib.pyplot as plt\n'), ((9706, 9725), 'matplotlib.pyplot.title', 'plt.title', (['"""image3"""'], {}), "('image3')\n", (9715, 9725), True, 'import matplotlib.pyplot as plt\n'), ((9734, 9745), 'time.time', 'time.time', ([], {}), '()\n', (9743, 9745), False, 'import time\n'), ((9821, 9831), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9829, 9831), True, 'import matplotlib.pyplot as plt\n'), ((3297, 3322), 'numpy.flip', 'np.flip', (['filter.T'], {'axis': '(0)'}), '(filter.T, axis=0)\n', (3304, 3322), True, 'import numpy as np\n'), ((3621, 3651), 'numpy.rad2deg', 'np.rad2deg', (['gradient_direction'], {}), '(gradient_direction)\n', (3631, 3651), True, 'import numpy as np\n'), ((3901, 4010), 'numpy.array', 'np.array', (['[[1, 4, 7, 4, 1], [4, 16, 26, 16, 4], [7, 26, 41, 26, 7], [4, 16, 26, 16, 4\n ], [1, 4, 7, 4, 1]]'], {}), '([[1, 4, 7, 4, 1], [4, 16, 26, 16, 4], [7, 26, 41, 26, 7], [4, 16, \n 26, 16, 4], [1, 4, 7, 4, 1]])\n', (3909, 4010), True, 'import numpy as np\n'), ((6972, 6985), 'numpy.sum', 'np.sum', (['(a * b)'], {}), '(a * b)\n', (6978, 6985), True, 'import numpy as np\n'), ((9465, 9481), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (9476, 9481), True, 'import matplotlib.pyplot as plt\n'), ((9482, 9503), 'matplotlib.pyplot.imshow', 'plt.imshow', (['template1'], {}), '(template1)\n', (9492, 9503), True, 'import matplotlib.pyplot as plt\n'), ((9536, 9552), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (9547, 9552), True, 'import matplotlib.pyplot as plt\n'), ((9553, 9570), 'matplotlib.pyplot.imshow', 'plt.imshow', (['show1'], {}), '(show1)\n', (9563, 9570), True, 'import matplotlib.pyplot as plt\n'), ((9601, 9617), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (9612, 9617), True, 'import matplotlib.pyplot as plt\n'), ((9618, 9635), 'matplotlib.pyplot.imshow', 'plt.imshow', (['show2'], {}), '(show2)\n', (9628, 9635), True, 'import matplotlib.pyplot as plt\n'), ((9666, 9682), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (9677, 9682), True, 'import matplotlib.pyplot as plt\n'), ((9683, 9700), 'matplotlib.pyplot.imshow', 'plt.imshow', (['show3'], {}), '(show3)\n', (9693, 9700), True, 'import matplotlib.pyplot as plt\n'), ((2956, 3029), 'numpy.sum', 'np.sum', (['(kernel * padded_image[row:row + kernel_row, col:col + kernel_col])'], {}), '(kernel * padded_image[row:row + kernel_row, col:col + kernel_col])\n', (2962, 3029), True, 'import numpy as np\n'), ((3386, 3408), 'numpy.square', 'np.square', (['new_image_x'], {}), '(new_image_x)\n', (3395, 3408), True, 'import numpy as np\n'), ((3411, 3433), 'numpy.square', 'np.square', (['new_image_y'], {}), '(new_image_y)\n', (3420, 3433), True, 'import numpy as np\n'), ((8477, 8501), 'numpy.linspace', 'np.linspace', (['(0.2)', '(1.0)', '(5)'], {}), '(0.2, 1.0, 5)\n', (8488, 8501), True, 'import numpy as np\n'), ((6994, 7007), 'numpy.sum', 'np.sum', (['(a * a)'], {}), '(a * a)\n', (7000, 7007), True, 'import numpy as np\n'), ((7008, 7021), 'numpy.sum', 'np.sum', (['(b * b)'], {}), '(b * b)\n', (7014, 7021), True, 'import numpy as np\n')] |
"""
Filtering and dataset mapping methods based on training dynamics.
By default, this module reads training dynamics from a given trained model and
computes the metrics---confidence, variability, correctness,
as well as baseline metrics of forgetfulness and threshold closeness
for each instance in the training data.
If specified, data maps can be plotted with respect to confidence and variability.
Moreover, datasets can be filtered with respect any of the other metrics.
"""
import argparse
import json
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
import torch
import tqdm
import imageio
from collections import defaultdict
from typing import List
from cartography.data_utils import read_data, read_jsonl, copy_dev_test
from cartography.selection.selection_utils import read_dynamics
# TODO(SS): Named tuple for tasks and filtering methods.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
def compute_forgetfulness(correctness_trend: List[float]) -> int:
"""
Given a epoch-wise trend of train predictions, compute frequency with which
an example is forgotten, i.e. predicted incorrectly _after_ being predicted correctly.
Based on: https://arxiv.org/abs/1812.05159
"""
if not any(correctness_trend): # Example is never predicted correctly, or learnt!
return 1000
learnt = False # Predicted correctly in the current epoch.
times_forgotten = 0
for is_correct in correctness_trend:
if (not learnt and not is_correct) or (learnt and is_correct):
# nothing changed.
continue
elif learnt and not is_correct:
# Forgot after learning at some point!
learnt = False
times_forgotten += 1
elif not learnt and is_correct:
# Learnt!
learnt = True
return times_forgotten
def compute_correctness(trend: List[float]) -> float:
"""
Aggregate #times an example is predicted correctly during all training epochs.
"""
return sum(trend)
def compute_train_dy_metrics_per_epoch(training_dynamics, heuristics, original_id, mode="train"):
"""
Given the training dynamics (logits for each training instance across epochs), compute metrics
based on it, for data map coorodinates.
Computed metrics are: confidence, variability, correctness, forgetfulness, threshold_closeness---
the last two being baselines from prior work
(Example Forgetting: https://arxiv.org/abs/1812.05159 and
Active Bias: https://arxiv.org/abs/1704.07433 respectively).
Returns:
- DataFrame with these metrics.
- DataFrame with more typical training evaluation metrics, such as accuracy / loss.
"""
confidence_ = {}
variability_ = {}
threshold_closeness_ = {}
correctness_ = {}
forgetfulness_ = {}
lexical = {}
constituent = {}
subsequence= {}
original_ids = {}
ood={}
predicted_labels = {}
golds_labels = {}
# Functions to be applied to the data.
variability_func = lambda conf: np.std(conf)
# if include_ci: # Based on prior work on active bias (https://arxiv.org/abs/1704.07433)
# variability_func = lambda conf: np.sqrt(np.var(conf) + np.var(conf) * np.var(conf) / (len(conf)-1))
threshold_closeness_func = lambda conf: conf * (1 - conf)
loss = torch.nn.CrossEntropyLoss()
num_tot_epochs = len(list(training_dynamics.values())[0]["logits"])
# if burn_out < num_tot_epochs:
# logger.info(f"Computing training dynamics. Burning out at {burn_out} of {num_tot_epochs}. ")
# else:
logger.info(f"Computing training dynamics across {num_tot_epochs} epochs")
logger.info("Metrics computed: confidence, variability, correctness, forgetfulness, threshold_closeness")
logits = {i: [] for i in range(num_tot_epochs)}
targets = {i: [] for i in range(num_tot_epochs)}
training_accuracy = defaultdict(float)
for guid in tqdm.tqdm(training_dynamics):
correctness_trend = []
true_probs_trend = []
correctness_ep = []
confidence_ep = []
variability_ep = []
prediction_ep = []
record = training_dynamics[guid]
for i, epoch_logits in enumerate(record["logits"]):
if i >= len(logits.keys()):
break
probs = torch.nn.functional.softmax(torch.Tensor(epoch_logits), dim=-1)
true_class_prob = float(probs[record["gold"]])
true_probs_trend.append(true_class_prob)
prediction = np.argmax(epoch_logits)
is_correct = (prediction == record["gold"]).item()
correctness_trend.append(is_correct)
training_accuracy[i] += is_correct
logits[i].append(epoch_logits)
targets[i].append(record["gold"])
correctness_ep.append(compute_correctness(correctness_trend))
confidence_ep.append(np.mean(true_probs_trend))
variability_ep.append(variability_func(true_probs_trend))
prediction_ep.append(prediction.item())
correctness_[guid] = correctness_ep
confidence_[guid] = confidence_ep
variability_[guid] = variability_ep
# if burn_out < num_tot_epochs:
# correctness_trend = correctness_trend[:burn_out]
# true_probs_trend = true_probs_trend[:burn_out]
# correctness_[guid] = compute_correctness(correctness_trend)
# confidence_[guid] = np.mean(true_probs_trend)
# variability_[guid] = variability_func(true_probs_trend)
# forgetfulness_[guid] = compute_forgetfulness(correctness_trend)
# threshold_closeness_[guid] = threshold_closeness_func(confidence_[guid])
lexical[guid] = heuristics[guid]["lexical"]
constituent[guid] = heuristics[guid]["constituent"]
subsequence[guid] = heuristics[guid]["subsequence"]
ood[guid] = heuristics[guid]["ood"]
original_ids[guid] = original_id[guid]
predicted_labels[guid] = prediction_ep
# Should not affect ranking, so ignoring.
epsilon_var = np.mean(list(variability_.values()))
column_names = ['guid',
'index',
# 'threshold_closeness',
'confidence',
'variability',
'correctness',
# 'forgetfulness',
'pred_label',
'lexical', 'constituent', 'subsequence', 'original_id']
if mode != "train":
column_names.insert(-1, "ood")
df = pd.DataFrame([[guid,
i,
# threshold_closeness_[guid],
confidence_[guid],
variability_[guid],
correctness_[guid],
predicted_labels[guid],
# forgetfulness_[guid],
lexical[guid],
constituent[guid],
subsequence[guid],
ood[guid],
original_ids[guid]
] for i, guid in enumerate(correctness_)], columns=column_names)
df_train = pd.DataFrame([[i,
loss(torch.Tensor(logits[i]), torch.LongTensor(targets[i])).item() / len(
training_dynamics),
training_accuracy[i] / len(training_dynamics)
] for i in range(num_tot_epochs)],
columns=['epoch', 'loss', 'train_acc'])
else:
df = pd.DataFrame([[guid,
i,
# threshold_closeness_[guid],
confidence_[guid],
variability_[guid],
correctness_[guid],
predicted_labels[guid],
# forgetfulness_[guid],
lexical[guid],
constituent[guid],
subsequence[guid],
original_ids[guid]
] for i, guid in enumerate(correctness_)], columns=column_names)
df_train = pd.DataFrame([[i,loss(torch.Tensor(logits[i]), torch.LongTensor(targets[i])).item() / len(training_dynamics),training_accuracy[i] / len(training_dynamics)] for i in range(num_tot_epochs)], columns=['epoch', 'loss', 'train_acc'])
df.to_csv(f"ALL_SAMPLES_{mode}.csv")
return df, df_train
def consider_ascending_order(filtering_metric: str) -> bool:
"""
Determine if the metric values' sorting order to get the most `valuable` examples for training.
"""
if filtering_metric == "variability":
return False
elif filtering_metric == "confidence":
return True
elif filtering_metric == "threshold_closeness":
return False
elif filtering_metric == "forgetfulness":
return False
elif filtering_metric == "correctness":
return True
else:
raise NotImplementedError(f"Filtering based on {filtering_metric} not implemented!")
def write_filtered_data(args, train_dy_metrics):
"""
Filter data based on the given metric, and write it in TSV format to train GLUE-style classifier.
"""
# First save the args for filtering, to keep track of which model was used for filtering.
argparse_dict = vars(args)
with open(os.path.join(args.filtering_output_dir, f"filtering_configs.json"), "w") as outfile:
outfile.write(json.dumps(argparse_dict, indent=4, sort_keys=True) + "\n")
# Determine whether to sort data in ascending order or not, based on the metric.
is_ascending = consider_ascending_order(args.metric)
if args.worst:
is_ascending = not is_ascending
# Sort by selection.
sorted_scores = train_dy_metrics.sort_values(by=[args.metric],
ascending=is_ascending)
original_train_file = os.path.join(os.path.join(args.data_dir, args.task_name), f"train.tsv")
train_numeric, header = read_data(original_train_file, task_name=args.task_name, guid_as_int=True)
for fraction in [0.01, 0.05, 0.10, 0.1667, 0.25, 0.3319, 0.50, 0.75]:
outdir = os.path.join(args.filtering_output_dir,
f"cartography_{args.metric}_{fraction:.2f}/{args.task_name}")
if not os.path.exists(outdir):
os.makedirs(outdir)
# Dev and test need not be subsampled.
copy_dev_test(args.task_name,
from_dir=os.path.join(args.data_dir, args.task_name),
to_dir=outdir)
num_samples = int(fraction * len(train_numeric))
with open(os.path.join(outdir, f"train.tsv"), "w") as outfile:
outfile.write(header + "\n")
selected = sorted_scores.head(n=num_samples+1)
if args.both_ends:
hardest = sorted_scores.head(n=int(num_samples * 0.7))
easiest = sorted_scores.tail(n=num_samples - hardest.shape[0])
selected = pd.concat([hardest, easiest])
fm = args.metric
logger.info(f"Selecting both ends: {fm} = "
f"({hardest.head(1)[fm].values[0]:3f}: {hardest.tail(1)[fm].values[0]:3f}) "
f"& ({easiest.head(1)[fm].values[0]:3f}: {easiest.tail(1)[fm].values[0]:3f})")
selection_iterator = tqdm.tqdm(range(len(selected)))
for idx in selection_iterator:
selection_iterator.set_description(
f"{args.metric} = {selected.iloc[idx][args.metric]:.4f}")
selected_id = selected.iloc[idx]["guid"]
if args.task_name in ["SNLI", "MNLI"]:
selected_id = int(selected_id)
elif args.task_name == "WINOGRANDE":
selected_id = str(int(selected_id))
record = train_numeric[selected_id]
outfile.write(record + "\n")
logger.info(f"Wrote {num_samples} samples to {outdir}.")
def mix_heuristics_label_eval(df):
df_lex_supp = df.loc[(df["lexical"] == 1)& (df["constituent"] == 0) &(df["subsequence"] == 0)]
df_lex_supp['mix_heurstic_label'] = f"lexical support (ood: " \
f"{df_lex_supp.loc[df_lex_supp['ood']==1].shape[0]} id: {df_lex_supp.loc[df_lex_supp['ood']==0].shape[0]})"
df_lex_cont = df.loc[(df["lexical"] == -1) & (df["constituent"] == 0) & (df["subsequence"] == 0)]
df_lex_cont['mix_heurstic_label'] = f"lexical contradict (ood: " \
f"{df_lex_cont.loc[df_lex_cont['ood']==1].shape[0]} id: {df_lex_cont.loc[df_lex_cont['ood']==0].shape[0]})"
df_const_supp = df.loc[(df["lexical"] == 0) & (df["constituent"] == 1) & (df["subsequence"] == 0)]
df_const_supp['mix_heurstic_label'] = f"constituent support (ood: " \
f"{df_const_supp.loc[df_const_supp['ood']==1].shape[0]} id: {df_const_supp.loc[df_const_supp['ood']==0].shape[0]})"
df_const_cont = df.loc[(df["lexical"] == 0) & (df["constituent"] == -1) & (df["subsequence"] == 0)]
df_const_cont['mix_heurstic_label'] = f"constituent contradict (ood: " \
f"{df_const_cont.loc[df_const_cont['ood']==1].shape[0]} id: {df_const_cont.loc[df_const_cont['ood']==0].shape[0]})"
df_sub_supp = df.loc[(df["lexical"] == 0) & (df["constituent"] == 0) & (df["subsequence"] == 1)]
df_sub_supp['mix_heurstic_label'] = f"subsequence support (ood: " \
f"{df_sub_supp.loc[df_sub_supp['ood']==1].shape[0]} id: {df_sub_supp.loc[df_sub_supp['ood']==0].shape[0]})"
df_sub_cont = df.loc[(df["lexical"] == 0) & (df["constituent"] == 0) & (df["subsequence"] == -1)]
df_sub_cont['mix_heurstic_label'] = f"subsequence contradict (ood: " \
f"{df_sub_cont.loc[df_sub_cont['ood']==1].shape[0]} id: {df_sub_cont.loc[df_sub_cont['ood']==0].shape[0]})"
df_mix = pd.concat([df_lex_supp, df_lex_cont, df_const_supp, df_const_cont,
df_sub_supp, df_sub_cont])
return df_mix
def mix_heuristics_label_train(df):
df_lex_supp = df.loc[(df["lexical"] == 1)& (df["constituent"] == 0) &(df["subsequence"] == 0)]
df_lex_supp['mix_heurstic_label'] = f"lexical support ({df_lex_supp.shape[0]}"
df_lex_cont = df.loc[(df["lexical"] == -1) & (df["constituent"] == 0) & (df["subsequence"] == 0)]
df_lex_cont['mix_heurstic_label'] = f"lexical contradict ({df_lex_cont.shape[0]}"
df_const_supp = df.loc[(df["lexical"] == 0) & (df["constituent"] == 1) & (df["subsequence"] == 0)]
df_const_supp['mix_heurstic_label'] = f"constituent support ({df_const_supp.shape[0]}"
df_const_cont = df.loc[(df["lexical"] == 0) & (df["constituent"] == -1) & (df["subsequence"] == 0)]
df_const_cont['mix_heurstic_label'] = f"constituent contradict ({df_const_cont.shape[0]}"
df_sub_supp = df.loc[(df["lexical"] == 0) & (df["constituent"] == 0) & (df["subsequence"] == 1)]
df_sub_supp['mix_heurstic_label'] = f"subsequence support ({df_sub_supp.shape[0]}"
df_sub_cont = df.loc[(df["lexical"] == 0) & (df["constituent"] == 0) & (df["subsequence"] == -1)]
df_sub_cont['mix_heurstic_label'] = f"subsequence contradict ({df_sub_cont.shape[0]}"
df_mix = pd.concat([df_lex_supp, df_lex_cont, df_const_supp, df_const_cont,
df_sub_supp, df_sub_cont])
return df_mix
def get_ambiguous_heuristics_samples(df, model_dir, df_orig=pd.read_csv("/home/jusun/adila001/MNLI/train_heuristic.tsv", sep='\t|\n'),
heu='lexical'):
df = df.loc[(df[heu] != 0)]
df = df.loc[df["variability"] >= 0.3]
df_heuristics_ORIG = df_orig.loc[df['original_id'].tolist()]
df_heuristics_ORIG= df_heuristics_ORIG.drop(['index', 'promptID', 'pairID'], axis=1)
df_heuristics_ORIG['confidence'] = df['confidence'].tolist()
df_heuristics_ORIG['variability'] = df['variability'].tolist()
csv_dir = os.path.join(model_dir, 'unique_samples_csv')
if not os.path.exists(csv_dir):
os.makedirs(csv_dir)
df_heuristics_ORIG.to_csv(os.path.join(csv_dir, 'ambiguous_samples.csv'))
def get_sorted_samples(df, model_dir, df_orig=pd.read_csv('/home/jusun/adila001/MNLI/train_heuristic.tsv', sep='\t|\n'),
n_sample=30,
decoded_label=["contradiction", "entailment", "neutral"],
columns_order = ['index', 'genre', 'sentence1', 'sentence2', 'variability',
'confidence', 'var_ep', 'conf_ep', 'gold_label', 'pred_label'], mode="train"):
csv_dir = os.path.join(model_dir, 'ANALYSIS_CLEAN', 'SORTED')
if not os.path.exists(csv_dir):
os.makedirs(csv_dir)
# heuristics = top_heuristic_obj.keys()
ep_number = len(df['variability'].tolist()[0])
for ep in range(ep_number):
# for heu in heuristics:
# df_heuristic = df.loc[df[heu] != 0]
df_copy = df.copy()
df_copy['var_ep'] = np.asarray(df_copy['variability'].tolist())[:, ep]
df_copy['conf_ep'] = np.asarray(df_copy['confidence'].tolist())[:, ep]
df_copy['pred_label'] = np.asarray(df_copy['pred_label'].tolist())[:, ep]
df_copy['pred_label'] = [decoded_label[pred] for pred in df_copy['pred_label']]
# random_sample = df_heuristic.sample(n= top_heuristic_obj[heu])
# top_n_var = df_copy.nlargest(n_sample, 'var_ep')
# top_n_conf = df_copy.nsmallest(df_copy.shape[0], 'conf_ep')
top_n_conf = df_copy.sort_values(by=['conf_ep'])
# top_n_var_ORIG = df_orig.loc[top_n_var['original_id'].tolist()]
# # top_n_var_ORIG = top_n_var_ORIG.drop(cols_to_drop, axis=1)
# top_n_var_ORIG['variability'] = top_n_var['variability'].tolist()
# top_n_var_ORIG['confidence'] = top_n_var['confidence'].tolist()
# top_n_var_ORIG['var_ep'] = top_n_var['var_ep'].tolist()
# top_n_var_ORIG['conf_ep'] = top_n_var['conf_ep'].tolist()
# top_n_var_ORIG['pred_label'] = top_n_var['pred_label'].tolist()
top_n_conf_ORIG = df_orig.loc[top_n_conf['original_id'].tolist()]
# top_n_conf_ORIG = top_n_conf_ORIG.drop(cols_to_drop, axis=1)
top_n_conf_ORIG['variability'] = top_n_conf['variability'].tolist()
top_n_conf_ORIG['confidence'] = top_n_conf['confidence'].tolist()
top_n_conf_ORIG['var_ep'] = top_n_conf['var_ep'].tolist()
top_n_conf_ORIG['conf_ep'] = top_n_conf['conf_ep'].tolist()
top_n_conf_ORIG['pred_label'] = top_n_conf['pred_label'].tolist()
# top_n_var_ORIG = top_n_var_ORIG[columns_order]
top_n_conf_ORIG = top_n_conf_ORIG[columns_order]
prefix = mode.upper()
# top_n_var_ORIG.to_csv(os.path.join(csv_dir, "{}_SORTED_VAR_ep_{}.csv".format(prefix, ep)))
top_n_conf_ORIG.to_csv(os.path.join(csv_dir, "{}_CONF_ep_{}_SORTED.csv".format(prefix, ep)))
# return top_n_var_ORIG, top_n_conf_ORIG
def get_top_n_heuristics_samples(df, model_dir, df_orig=pd.read_csv('/home/jusun/adila001/MNLI/train_heuristic.tsv', sep='\t|\n'),
top_heuristic_obj = {'lexical': 20, 'constituent': 20, 'subsequence': 20},
decoded_label=["contradiction", "entailment", "neutral"],
columns_order = ['genre', 'sentence1', 'sentence2', 'variability',
'confidence', 'var_ep', 'conf_ep', 'gold_label', 'pred_label']):
csv_dir = os.path.join(model_dir, 'heuristics_only_csv_EVAL')
if not os.path.exists(csv_dir):
os.makedirs(csv_dir)
heuristics = top_heuristic_obj.keys()
ep_number = len(df['variability'].tolist()[0])
for ep in range(ep_number):
for heu in heuristics:
df_heuristic = df.loc[df[heu] != 0]
df_heuristic['var_ep'] = np.asarray(df_heuristic['variability'].tolist())[:,ep]
df_heuristic['conf_ep'] = np.asarray(df_heuristic['confidence'].tolist())[:, ep]
df_heuristic['pred_label'] = np.asarray(df_heuristic['pred_label'].tolist())[:, ep]
df_heuristic['pred_label'] = [decoded_label[pred] for pred in df_heuristic['pred_label']]
# random_sample = df_heuristic.sample(n= top_heuristic_obj[heu])
top_n_var = df_heuristic.nlargest(top_heuristic_obj[heu],'var_ep')
top_n_conf = df_heuristic.nlargest(top_heuristic_obj[heu],'conf_ep')
# random_sample_ORIG = df_orig.loc[random_sample['original_id'].tolist()]
# random_sample_ORIG = random_sample_ORIG.drop(['index', 'promptID', 'pairID'], axis=1)
# random_sample_ORIG['variability'] = random_sample['variability'].tolist()
# random_sample_ORIG['confidence'] = random_sample['confidence'].tolist()
# random_sample_ORIG['var_ep'] = random_sample['var_ep'].tolist()
# random_sample_ORIG['conf_ep'] = random_sample['conf_ep'].tolist()
top_n_var_ORIG = df_orig.loc[top_n_var['original_id'].tolist()]
# top_n_var_ORIG = top_n_var_ORIG.drop(cols_to_drop, axis=1)
top_n_var_ORIG['variability'] = top_n_var['variability'].tolist()
top_n_var_ORIG['confidence'] = top_n_var['confidence'].tolist()
top_n_var_ORIG['var_ep'] = top_n_var['var_ep'].tolist()
top_n_var_ORIG['conf_ep'] = top_n_var['conf_ep'].tolist()
top_n_var_ORIG['pred_label'] = top_n_var['pred_label'].tolist()
top_n_conf_ORIG = df_orig.loc[top_n_conf['original_id'].tolist()]
# top_n_conf_ORIG = top_n_conf_ORIG.drop(cols_to_drop, axis=1)
top_n_conf_ORIG['variability'] = top_n_conf['variability'].tolist()
top_n_conf_ORIG['confidence'] = top_n_conf['confidence'].tolist()
top_n_conf_ORIG['var_ep'] = top_n_conf['var_ep'].tolist()
top_n_conf_ORIG['conf_ep'] = top_n_conf['conf_ep'].tolist()
top_n_conf_ORIG['pred_label'] = top_n_conf['pred_label'].tolist()
top_n_var_ORIG = top_n_var_ORIG[columns_order]
top_n_conf_ORIG = top_n_conf_ORIG[columns_order]
# print(random_sample_ORIG)
# print(f"{heu}_EP_{ep}")
# print(top_n_var_ORIG)
# print(top_n_conf_ORIG)
# top_n_var_ORIG.to_csv(os.path.join(csv_dir, "{}_TOP_VAR_ep_{}.csv".format(heu, ep)))
top_n_conf_ORIG.to_csv(os.path.join(csv_dir, "{}_TOP_CONF_ep_{}_LARGEST.csv".format(heu, ep)))
# return top_n_var_ORIG, top_n_conf_ORIG
# random_sample_ORIG.to_csv(os.path.join(csv_dir,"{}_RANDOM.csv".format(heu)),index=False)
# top_n_var_ORIG.to_csv(os.path.join(csv_dir, "{}_TOP_VAR.csv".format(heu)), index=False)
# top_n_conf_ORIG.to_csv(os.path.join(csv_dir, "{}_TOP_CONF.csv".format(heu)), index=False)
def find_max_var(var_arr):
return np.amax(var_arr)
def plot_train_epochs(args, training_dynamics, heuristics, original_id, gif =True):
total_epochs = len(list(training_dynamics.values())[0]["logits"])
df, _ = compute_train_dy_metrics_per_epoch(training_dynamics, heuristics, original_id)
train_dy_filename = os.path.join(args.model_dir, f"td_metrics.jsonl")
df.to_json(train_dy_filename,
orient='records',
lines=True)
logger.info(f"Metrics based on Training Dynamics written to {train_dy_filename}")
df_heuristics = df.loc[(df["lexical"] != 0) | (df["constituent"] != 0) | (df["subsequence"] != 0)]
df_others = df.loc[(df["lexical"] == 0) & (df["constituent"] == 0) & (df["subsequence"] == 0)]
max_instances_heuristic = {
'lexical': df_heuristics.loc[df_heuristics['lexical'] != 0].shape[0],
'subsequence': df_heuristics.loc[df_heuristics['subsequence'] != 0].shape[0],
'constituent': df_heuristics.loc[df_heuristics['constituent'] != 0].shape[0]
}
heuristics = ['lexical', 'constituent', 'subsequence']
max_var = find_max_var(df_heuristics['variability'].tolist())
for heuristic in heuristics:
figs = []
max_instances_to_plot = max_instances_heuristic[heuristic]
df_current_heuristic = df_heuristics.loc[df_heuristics[heuristic] != 0]
# df_others_sampled = df_others.sample(n=max_instances_to_plot-df_current_heuristic.shape[0])
df_others_sampled = df_others.sample(n=df_current_heuristic.shape[0]*2)
df_current_heuristic = df_current_heuristic.append(df_others_sampled, ignore_index=True)
# ### DEV ###
# for ep in range(total_epochs):
# ### DEV ###
for ep in range(2,total_epochs):
df_current_heuristic_epoch = df_current_heuristic.copy()
# print(df_current_heuristic_epoch['confidence'])
confidence_epoch = np.asarray(df_current_heuristic_epoch['confidence'].tolist())[:,ep].flatten()
var_epoch = np.asarray(df_current_heuristic_epoch['variability'].tolist())[:,ep].flatten()
correctness_epoch = np.asarray(df_current_heuristic_epoch['correctness'].tolist())[:,ep].flatten()
df_current_heuristic_epoch.drop(['confidence', 'variability', 'correctness'], axis=1)
df_current_heuristic_epoch['confidence'] = confidence_epoch
df_current_heuristic_epoch['variability'] = var_epoch
df_current_heuristic_epoch['correctness'] = correctness_epoch
fig = plot_heuristics_mix(df_current_heuristic_epoch, os.path.join(args.plots_dir, 'train_plots'), hue_metric=heuristic,
title='{}_epoch_{}'.format(heuristic, ep), max_var=max_var)
figs.append(convert_fig_to_arr(fig))
if gif:
kwargs_write = {'fps': 1.0, 'quantizer': 'nq'}
gif_path = os.path.join(args.plots_dir, "train_plots", f'TRAIN_{ep}_epochs.gif')
# gif_path = f'{args.plots_dir}/{heuristic}_{ep}_epochs.gif'
imageio.mimsave(gif_path, figs, fps=1)
logger.info(f"Aminated gif saved to {gif_path}")
df_heuristics_mix = mix_heuristics_label_train(df_heuristics)
figs = []
# ### DEV ###
# for ep in range(total_epochs):
# ### DEV ###
df_others_sampled = df_others.sample(n=df_heuristics_mix.shape[0] * 2)
for ep in range(2,total_epochs):
df_heuristic_mix_epoch = df_heuristics_mix.copy()
confidence_epoch = np.asarray(df_heuristic_mix_epoch['confidence'].tolist())[:, ep].flatten()
var_epoch = np.asarray(df_heuristic_mix_epoch['variability'].tolist())[:, ep].flatten()
correctness_epoch = np.asarray(df_heuristic_mix_epoch['correctness'].tolist())[:, ep].flatten()
df_heuristic_mix_epoch.drop(['confidence', 'variability', 'correctness'], axis=1)
df_heuristic_mix_epoch['confidence'] = confidence_epoch
df_heuristic_mix_epoch['variability'] = var_epoch
df_heuristic_mix_epoch['correctness'] = correctness_epoch
# df_heuristic_mix_epoch = pd.concat([df_others_sampled, df_heuristic_mix_epoch])
fig = plot_heuristics_only(df_heuristic_mix_epoch,os.path.join(args.plots_dir, "train_plots"),title=f'HEURISTICS_ONLY_{ep}', max_var=max_var)
figs.append(convert_fig_to_arr(fig))
if gif:
kwargs_write = {'fps': 1.0, 'quantizer': 'nq'}
gif_path = os.path.join(args.plots_dir, "train_plots", f'TRAIN_{ep}_epochs_ALL.gif')
# gif_path = f'{args.plots_dir}/HEURISTICS_ONLY_{ep}_epochs.gif'
imageio.mimsave(gif_path, figs, fps=1)
logger.info(f"Aminated gif saved to {gif_path}")
def plot_eval_epochs(args, id_obj, ood_obj, gif =True):
id_dynamics, id_heuristics, id_original_idx, id_pred = id_obj[0], id_obj[1], id_obj[2], id_obj[3]
ood_dynamics, ood_heuristics, ood_original_idx, ood_pred = ood_obj[0], ood_obj[1], ood_obj[2], ood_obj[3]
total_epochs = len(list(id_dynamics.values())[0]["logits"])
df_id, _ = compute_train_dy_metrics_per_epoch(id_dynamics, id_heuristics, id_original_idx, mode="eval")
df_ood, _ = compute_train_dy_metrics_per_epoch(ood_dynamics, ood_heuristics, ood_original_idx, mode="eval")
df_ood['ood'] = 1
df_id['ood'] = 0
id_dy_filename = os.path.join(args.model_dir, f"iid_metrics.jsonl")
df_id.to_json(id_dy_filename,
orient='records',
lines=True)
ood_dy_filename = os.path.join(args.model_dir, f"ood_metrics.jsonl")
df_ood.to_json(ood_dy_filename,
orient='records',
lines=True)
logger.info(f"Metrics based on Eval Dynamics written to {id_dy_filename} and {ood_dy_filename}")
df = pd.concat([df_id, df_ood])
max_var = find_max_var(df['variability'].tolist())
df_heuristics = df.loc[(df["lexical"] != 0) | (df["constituent"] != 0) | (df["subsequence"] != 0)]
df_ood = df.loc[(df["ood"] != 0)]
df_concern = pd.concat([df_heuristics, df_ood])
df_concern = mix_heuristics_label_eval(df_concern)
df_others = df.loc[(df["lexical"] == 0) & (df["constituent"] == 0) & (df["subsequence"] == 0) & (df["ood"] == 0)]
print(df_others.shape)
print(df_ood.shape)
df_others_sample = df_others.sample(n= int(np.ceil(df_ood.shape[0])) if df_ood.shape[0] < df_others.shape[0] else df_others.shape[0] )
df = pd.concat([df_concern, df_others_sample])
df = df.fillna("no heuristic")
print(df_heuristics.shape[0], df_others_sample.shape[0], df_ood.shape[0])
figs = []
palette=iter(sns.husl_palette(len(np.unique(df["mix_heurstic_label"].tolist()))+1))
# ### DEV ###
# for ep in range(total_epochs):
# ### DEV ###
for ep in range(2, total_epochs):
df_heuristic_mix_epoch = df.copy()
confidence_epoch = np.asarray(df_heuristic_mix_epoch['confidence'].tolist())[:, ep].flatten()
var_epoch = np.asarray(df_heuristic_mix_epoch['variability'].tolist())[:, ep].flatten()
correctness_epoch = np.asarray(df_heuristic_mix_epoch['correctness'].tolist())[:, ep].flatten()
df_heuristic_mix_epoch.drop(['confidence', 'variability', 'correctness'], axis=1)
df_heuristic_mix_epoch['confidence'] = confidence_epoch
df_heuristic_mix_epoch['variability'] = var_epoch
df_heuristic_mix_epoch['correctness'] = correctness_epoch
fig = plot_heuristics_only(df_heuristic_mix_epoch, os.path.join(args.plots_dir, "eval_plots"), title=f'EVAL_{ep}',
max_var=max_var, style="ood")
figs.append(convert_fig_to_arr(fig))
if gif:
kwargs_write = {'fps': 1.0, 'quantizer': 'nq'}
gif_path = os.path.join(args.plots_dir, "eval_plots", f'EVAL_{ep}_epochs.gif')
imageio.mimsave(gif_path, figs, fps=1)
logger.info(f"Aminated gif saved to {gif_path}")
def convert_fig_to_arr(fig):
fig.canvas.draw() # draw the canvas, cache the renderer
image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return image
def compute_train_dy_metrics(training_dynamics, heuristics, original_id, burn_out):
confidence_ = {}
variability_ = {}
threshold_closeness_ = {}
correctness_ = {}
forgetfulness_ = {}
lexical = {}
constituent = {}
subsequence = {}
original_ids = {}
# Functions to be applied to the data.
variability_func = lambda conf: np.std(conf)
threshold_closeness_func = lambda conf: conf * (1 - conf)
loss = torch.nn.CrossEntropyLoss()
num_tot_epochs = len(list(training_dynamics.values())[0]["logits"])
logger.info(f"Computing training dynamics across {num_tot_epochs} epochs")
logger.info("Metrics computed: confidence, variability, correctness, forgetfulness, threshold_closeness")
logits = {i: [] for i in range(num_tot_epochs)}
targets = {i: [] for i in range(num_tot_epochs)}
training_accuracy = defaultdict(float)
for guid in tqdm.tqdm(training_dynamics):
correctness_trend = []
true_probs_trend = []
record = training_dynamics[guid]
for i, epoch_logits in enumerate(record["logits"]):
if i >= len(logits.keys()):
break
probs = torch.nn.functional.softmax(torch.Tensor(epoch_logits), dim=-1)
true_class_prob = float(probs[record["gold"]])
true_probs_trend.append(true_class_prob)
prediction = np.argmax(epoch_logits)
is_correct = (prediction == record["gold"]).item()
correctness_trend.append(is_correct)
training_accuracy[i] += is_correct
logits[i].append(epoch_logits)
targets[i].append(record["gold"])
if burn_out < num_tot_epochs:
correctness_trend = correctness_trend[:burn_out]
true_probs_trend = true_probs_trend[:burn_out]
correctness_[guid] = compute_correctness(correctness_trend)
confidence_[guid] = np.mean(true_probs_trend)
variability_[guid] = variability_func(true_probs_trend)
forgetfulness_[guid] = compute_forgetfulness(correctness_trend)
threshold_closeness_[guid] = threshold_closeness_func(confidence_[guid])
lexical[guid] = heuristics[guid]["lexical"]
constituent[guid] = heuristics[guid]["constituent"]
subsequence[guid] = heuristics[guid]["subsequence"]
original_ids[guid] = original_id[guid]
# Should not affect ranking, so ignoring.
epsilon_var = np.mean(list(variability_.values()))
column_names = ['guid',
'index',
'threshold_closeness',
'confidence',
'variability',
'correctness',
'forgetfulness', 'lexical', 'constituent', 'subsequence', 'original_id']
df = pd.DataFrame([[guid,
i,
threshold_closeness_[guid],
confidence_[guid],
variability_[guid],
correctness_[guid],
forgetfulness_[guid],
lexical[guid],
constituent[guid],
subsequence[guid],
original_ids[guid]
] for i, guid in enumerate(correctness_)], columns=column_names)
df_train = pd.DataFrame([[i,
loss(torch.Tensor(logits[i]), torch.LongTensor(targets[i])).item() / len(
training_dynamics),
training_accuracy[i] / len(training_dynamics)
] for i in range(num_tot_epochs)],
columns=['epoch', 'loss', 'train_acc'])
return df, df_train
def plot_heuristics_only(
df: pd.DataFrame,
plot_dir: os.path,
title: str = '', save=True, max_var=0.5, style=None, palette = None):
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
main_metric = 'variability'
other_metric = 'confidence'
hue = "mix_heurstic_label"
num_hues = len(df[hue].unique().tolist())
fig, ax0 = plt.subplots(1, 1, figsize=(12, 10))
# Choose a palette.
pal = sns.diverging_palette(260, 15, n=num_hues, sep=10, center="dark")
plot = sns.scatterplot(x=main_metric,
y=other_metric,
ax=ax0,
data=df,
hue=hue,
# palette=pal,
# style=hue,
s=30,
style=style,
marker = 'o' if style is not None else None,
# palette="tab10"
# palette=['green', 'orange', 'brown', 'dodgerblue', 'red']
palette = palette if palette else "tab10"
)
# Annotate Regions.
bb = lambda c: dict(boxstyle="round,pad=0.3", ec=c, lw=2, fc="white")
func_annotate = lambda text, xyc, bbc: ax0.annotate(text,
xy=xyc,
xycoords="axes fraction",
fontsize=15,
color='black',
va="center",
ha="center",
rotation=350,
bbox=bb(bbc))
an1 = func_annotate("ambiguous", xyc=(0.9, 0.5), bbc='black')
an2 = func_annotate("easy-to-learn", xyc=(0.27, 0.85), bbc='r')
an3 = func_annotate("hard-to-learn", xyc=(0.35, 0.25), bbc='b')
plot.legend(ncol=1, bbox_to_anchor=[0.175, 0.5], loc='right', fontsize ='small')
plot.set_xlabel('variability')
plot.set_ylabel('confidence')
plot.set_title(title)
ax0.set_xlim(0, max_var)
ax0.set_ylim(0, 1)
fig.tight_layout()
filename = f'{plot_dir}/{title}.png'
if save:
fig.savefig(filename, dpi=300)
logger.info(f"Plot saved to {filename}")
return fig
def plot_heuristics_mix(
df: pd.DataFrame,
plot_dir: os.path,
hue_metric: str = 'lexical',
title: str = '',
save=True,
max_var = 0.5):
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
# Normalize correctness to a value between 0 and 1.
dataframe = df.assign(corr_frac=lambda d: d.correctness / d.correctness.max())
dataframe['correct.'] = [f"{x:.1f}" for x in dataframe['corr_frac']]
main_metric = 'variability'
other_metric = 'confidence'
hue = hue_metric
num_hues = len(dataframe[hue].unique().tolist())
style = hue_metric if num_hues < 8 else None
fig, ax0 = plt.subplots(1, 1, figsize=(8, 6))
# Make the scatterplot.
# Choose a palette.
pal = sns.diverging_palette(260, 15, n=num_hues, sep=10, center="dark")
plot = sns.scatterplot(x=main_metric,
y=other_metric,
ax=ax0,
data=df,
hue=hue,
palette=pal,
style=style,
s=30)
# Annotate Regions.
bb = lambda c: dict(boxstyle="round,pad=0.3", ec=c, lw=2, fc="white")
func_annotate = lambda text, xyc, bbc: ax0.annotate(text,
xy=xyc,
xycoords="axes fraction",
fontsize=15,
color='black',
va="center",
ha="center",
rotation=350,
bbox=bb(bbc))
an1 = func_annotate("ambiguous", xyc=(0.9, 0.5), bbc='black')
an2 = func_annotate("easy-to-learn", xyc=(0.27, 0.85), bbc='r')
an3 = func_annotate("hard-to-learn", xyc=(0.35, 0.25), bbc='b')
plot.legend(ncol=1, bbox_to_anchor=[0.175, 0.5], loc='right')
plot.set_xlabel('variability')
plot.set_ylabel('confidence')
plot.set_title(title)
ax0.set_xlim(0, max_var)
ax0.set_ylim(0, 1)
fig.tight_layout()
filename = f'{plot_dir}/{title}.png'
# print('PLOT HIST', filename)
if save:
fig.savefig(filename, dpi=300)
logger.info(f"Plot saved to {filename}")
return fig
def plot_data_map(dataframe: pd.DataFrame,
plot_dir: os.path,
hue_metric: str = 'correct.',
title: str = '',
model: str = 'RoBERTa',
show_hist: bool = False,
max_instances_to_plot = 55000):
# Set style.
sns.set(style='whitegrid', font_scale=1.6, font='Georgia', context='paper')
logger.info(f"Plotting figure for {title} using the {model} model ...")
# Subsample data to plot, so the plot is not too busy.
dataframe = dataframe.sample(n=max_instances_to_plot if dataframe.shape[0] > max_instances_to_plot else len(dataframe))
# Normalize correctness to a value between 0 and 1.
dataframe = dataframe.assign(corr_frac = lambda d: d.correctness / d.correctness.max())
dataframe['correct.'] = [f"{x:.1f}" for x in dataframe['corr_frac']]
main_metric = 'variability'
other_metric = 'confidence'
hue = hue_metric
num_hues = len(dataframe[hue].unique().tolist())
style = hue_metric if num_hues < 8 else None
if not show_hist:
fig, ax0 = plt.subplots(1, 1, figsize=(8, 6))
else:
fig = plt.figure(figsize=(14, 10), )
gs = fig.add_gridspec(3, 2, width_ratios=[5, 1])
ax0 = fig.add_subplot(gs[:, 0])
# Make the scatterplot.
# Choose a palette.
pal = sns.diverging_palette(260, 15, n=num_hues, sep=10, center="dark")
plot = sns.scatterplot(x=main_metric,
y=other_metric,
ax=ax0,
data=dataframe,
hue=hue,
palette=pal,
style=style,
s=30)
# Annotate Regions.
bb = lambda c: dict(boxstyle="round,pad=0.3", ec=c, lw=2, fc="white")
func_annotate = lambda text, xyc, bbc : ax0.annotate(text,
xy=xyc,
xycoords="axes fraction",
fontsize=15,
color='black',
va="center",
ha="center",
rotation=350,
bbox=bb(bbc))
an1 = func_annotate("ambiguous", xyc=(0.9, 0.5), bbc='black')
an2 = func_annotate("easy-to-learn", xyc=(0.27, 0.85), bbc='r')
an3 = func_annotate("hard-to-learn", xyc=(0.35, 0.25), bbc='b')
if not show_hist:
plot.legend(ncol=1, bbox_to_anchor=[0.175, 0.5], loc='right')
else:
plot.legend(fancybox=True, shadow=True, ncol=1)
plot.set_xlabel('variability')
plot.set_ylabel('confidence')
if show_hist:
plot.set_title(f"{title}-{model} Data Map", fontsize=17)
# Make the histograms.
ax1 = fig.add_subplot(gs[0, 1])
ax2 = fig.add_subplot(gs[1, 1])
ax3 = fig.add_subplot(gs[2, 1])
plott0 = dataframe.hist(column=['confidence'], ax=ax1, color='#622a87')
plott0[0].set_title('')
plott0[0].set_xlabel('confidence')
plott0[0].set_ylabel('density')
plott1 = dataframe.hist(column=['variability'], ax=ax2, color='teal')
plott1[0].set_title('')
plott1[0].set_xlabel('variability')
plott1[0].set_ylabel('density')
plot2 = sns.countplot(x="correct.", data=dataframe, ax=ax3, color='#86bf91')
ax3.xaxis.grid(True) # Show the vertical gridlines
plot2.set_title('')
plot2.set_xlabel('correctness')
plot2.set_ylabel('density')
fig.tight_layout()
filename = f'{plot_dir}/{title}_{model}.pdf' if show_hist else f'figures/compact_{title}_{model}.png'
print('PLOT ORIGINAL', filename)
fig.savefig(filename, dpi=300)
logger.info(f"Plot saved to {filename}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--filter",
action="store_true",
help="Whether to filter data subsets based on specified `metric`.")
parser.add_argument("--plot_train",
action="store_true",
help="Whether to plot train data maps and save as `png`.")
parser.add_argument("--plot_eval",
action="store_true",
help="Whether to plot eval data maps and save as `png`.")
parser.add_argument("--model_dir",
"-o",
required=True,
type=os.path.abspath,
help="Directory where model training dynamics stats reside.")
parser.add_argument("--data_dir",
"-d",
default="/Users/swabhas/data/glue/WINOGRANDE/xl/",
type=os.path.abspath,
help="Directory where data for task resides.")
parser.add_argument("--plots_dir",
default="./cartography/",
type=os.path.abspath,
help="Directory where plots are to be saved.")
parser.add_argument("--task_name",
"-t",
default="WINOGRANDE",
choices=("SNLI", "MNLI", "QNLI", "WINOGRANDE", "RTE", "WNLI"),
help="Which task are we plotting or filtering for.")
parser.add_argument('--metric',
choices=('threshold_closeness',
'confidence',
'variability',
'correctness',
'forgetfulness'),
help="Metric to filter data by.",)
parser.add_argument("--include_ci",
action="store_true",
help="Compute the confidence interval for variability.")
parser.add_argument("--filtering_output_dir",
"-f",
default="./filtered/",
type=os.path.abspath,
help="Output directory where filtered datasets are to be written.")
parser.add_argument("--worst",
action="store_true",
help="Select from the opposite end of the spectrum acc. to metric,"
"for baselines")
parser.add_argument("--both_ends",
action="store_true",
help="Select from both ends of the spectrum acc. to metric,")
parser.add_argument("--burn_out",
type=int,
default=100,
help="# Epochs for which to compute train dynamics.")
parser.add_argument("--model",
default="RoBERTa",
help="Model for which data map is being plotted")
parser.add_argument("--plot_gif",
action="store_true",
help="Whether to plot gif or not")
args = parser.parse_args()
# if args.plot_gif:
# assert len(os.listdir(args.plots_dir)) > 0
# plot_gif(args.plots_dir)
# exit()
# total_epochs = len(list(training_dynamics.values())[0]["logits"])
# if args.burn_out > total_epochs:
# args.burn_out = total_epochs
# logger.info(f"Total epochs found: {args.burn_out}")
# train_dy_metrics, _ = compute_train_dy_metrics(training_dynamics, heuristics, original_id, args.burn_out)
#
# burn_out_str = f"_{args.burn_out}" if args.burn_out > total_epochs else ""
# train_dy_filename = os.path.join(args.model_dir, f"td_metrics{burn_out_str}.jsonl")
# train_dy_metrics.to_json(train_dy_filename,
# orient='records',
# lines=True)
# logger.info(f"Metrics based on Training Dynamics written to {train_dy_filename}")
# if args.filter:
# assert args.filtering_output_dir
# if not os.path.exists(args.filtering_output_dir):
# os.makedirs(args.filtering_output_dir)
# assert args.metric
# write_filtered_data(args, train_dy_metrics)
assert args.plots_dir
args.plots_dir = os.path.join(args.model_dir, "plots")
if not os.path.exists(args.plots_dir):
os.makedirs(args.plots_dir)
if args.plot_train:
# plot_data_map(train_dy_metrics, args.plots_dir, title=args.task_name, show_hist=True, model=args.model)
# plot_heuristics_mix(args, train_dy_metrics, args.plots_dir, title=args.task_name)
# plot_heuristics_only(args, train_dy_metrics, args.plots_dir, title=args.task_name)
# get_ambiguous_heuristics_samples(train_dy_metrics, args.model_dir)
# get_top_n_heuristics_samples(train_dy_metrics, args.model_dir)
training_dynamics, heuristics, original_id, pred_labels = read_dynamics(args.model_dir,
strip_last=True if args.task_name in [
"QNLI"] else False,
burn_out=args.burn_out if args.burn_out < 100 else None)
df_train, _ = compute_train_dy_metrics_per_epoch(training_dynamics, heuristics, original_id, mode="eval")
# plot_train_epochs(args, training_dynamics, heuristics, original_id, gif=True)
get_sorted_samples(df_train, args.model_dir,
pd.read_csv('/home/jusun/adila001/RTE/train_heuristic.tsv',
sep='\t|\n'),
decoded_label=["entailment", "not_entailment"],
columns_order=['index', 'sentence1', 'sentence2', 'variability',
'confidence', 'var_ep', 'conf_ep', 'lexical',
'subsequence',
'gold_label', 'pred_label'])
# get_top_n_heuristics_samples(df_train, args.model_dir,
# pd.read_csv('/home/jusun/adila001/RTE/train_heuristic.tsv',
# sep='\t|\n'),
# columns_order=['sentence1', 'sentence2', 'variability',
# 'confidence', 'var_ep', 'conf_ep', 'lexical',
# 'subsequence',
# 'gold_label', 'pred_label'],
# decoded_label=["entailment", "not_entailment"],
# top_heuristic_obj={'lexical': 10})
if args.plot_eval:
# get_ambiguous_heuristics_samples(train_dy_metrics, args.model_dir)
eval_ID_dynamics, heuristics_ID, original_id_ID, pred_labels_ID = read_dynamics(args.model_dir,
strip_last=True if args.task_name in [
"QNLI"] else False,
burn_out=args.burn_out if args.burn_out < 100 else None, mode="eval_ID")
eval_OOD_dynamics, heuristics_OOD, original_id_OOD, pred_labels_OOD = read_dynamics(args.model_dir,
strip_last=True if args.task_name in [
"QNLI"] else False,
burn_out=args.burn_out if args.burn_out < 100 else None, mode="eval_OOD")
df_id, _ = compute_train_dy_metrics_per_epoch(eval_ID_dynamics, heuristics_ID, original_id_ID, mode="in_dist")
df_ood, _ = compute_train_dy_metrics_per_epoch(eval_OOD_dynamics, heuristics_OOD, original_id_OOD, mode="ood")
# get_top_n_heuristics_samples(df_id, args.model_dir,
# pd.read_csv('/home/jusun/adila001/MNLI/dev_matched_heuristic.tsv', sep='\t|\n'),
# # decoded_label=["entailment", "not_entailment"],
# columns_order=['sentence1', 'sentence2', 'variability',
# 'confidence', 'var_ep', 'conf_ep', 'lexical', 'constituent',
# 'subsequence',
# 'gold_label', 'pred_label'],
# top_heuristic_obj={'lexical': 30})
# # df_ood['ood'] = 1
# get_top_n_heuristics_samples(df_ood, args.model_dir,
# pd.read_csv('/home/jusun/adila001/WNLI/train_heuristic.tsv', sep='\t|\n'),
# decoded_label=["not_entailment", "entailment"],
# columns_order=['sentence1', 'sentence2', 'variability',
# 'confidence', 'var_ep', 'conf_ep', 'lexical', 'subsequence',
# 'gold_label', 'pred_label'],
# top_heuristic_obj={'lexical':30})
get_sorted_samples(df_id, args.model_dir,
pd.read_csv('/home/jusun/adila001/RTE/dev_heuristic.tsv',
sep='\t|\n'),
decoded_label=["entailment", "not_entailment"],
columns_order=['index', 'sentence1', 'sentence2', 'variability',
'confidence', 'var_ep', 'conf_ep', 'lexical',
'subsequence',
'gold_label', 'pred_label'], mode='in_dist')
df_ood['ood'] = 1
get_sorted_samples(df_ood, args.model_dir,
pd.read_csv('/home/jusun/adila001/WNLI/train_heuristic.tsv',
sep='\t|\n'),
decoded_label=["not_entailment", "entailment"],
columns_order=['index', 'sentence1', 'sentence2', 'variability',
'confidence', 'var_ep', 'conf_ep', 'lexical',
'subsequence',
'gold_label', 'pred_label'], mode='ood')
# print(id_conf)
# print(ood_conf)
plot_eval_epochs(args, [eval_ID_dynamics, heuristics_ID, original_id_ID, pred_labels_ID],
[eval_OOD_dynamics, heuristics_OOD, original_id_OOD, pred_labels_OOD], gif=True)
| [
"logging.getLogger",
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"torch.LongTensor",
"seaborn.scatterplot",
"os.path.exists",
"seaborn.set",
"numpy.mean",
"argparse.ArgumentParser",
"json.dumps",
"pandas.concat",
"numpy.ceil",
"seaborn.diverging_palette",
"torch.Tensor",
"numpy.argma... | [((923, 1030), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.INFO)\n", (942, 1030), False, 'import logging\n'), ((1039, 1066), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1056, 1066), False, 'import logging\n'), ((3331, 3358), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (3356, 3358), False, 'import torch\n'), ((3881, 3899), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (3892, 3899), False, 'from collections import defaultdict\n'), ((3915, 3943), 'tqdm.tqdm', 'tqdm.tqdm', (['training_dynamics'], {}), '(training_dynamics)\n', (3924, 3943), False, 'import tqdm\n'), ((9799, 9873), 'cartography.data_utils.read_data', 'read_data', (['original_train_file'], {'task_name': 'args.task_name', 'guid_as_int': '(True)'}), '(original_train_file, task_name=args.task_name, guid_as_int=True)\n', (9808, 9873), False, 'from cartography.data_utils import read_data, read_jsonl, copy_dev_test\n'), ((13604, 13701), 'pandas.concat', 'pd.concat', (['[df_lex_supp, df_lex_cont, df_const_supp, df_const_cont, df_sub_supp,\n df_sub_cont]'], {}), '([df_lex_supp, df_lex_cont, df_const_supp, df_const_cont,\n df_sub_supp, df_sub_cont])\n', (13613, 13701), True, 'import pandas as pd\n'), ((14928, 15025), 'pandas.concat', 'pd.concat', (['[df_lex_supp, df_lex_cont, df_const_supp, df_const_cont, df_sub_supp,\n df_sub_cont]'], {}), '([df_lex_supp, df_lex_cont, df_const_supp, df_const_cont,\n df_sub_supp, df_sub_cont])\n', (14937, 15025), True, 'import pandas as pd\n'), ((15121, 15194), 'pandas.read_csv', 'pd.read_csv', (['"""/home/jusun/adila001/MNLI/train_heuristic.tsv"""'], {'sep': '"""\t|\n"""'}), "('/home/jusun/adila001/MNLI/train_heuristic.tsv', sep='\\t|\\n')\n", (15132, 15194), True, 'import pandas as pd\n'), ((15625, 15670), 'os.path.join', 'os.path.join', (['model_dir', '"""unique_samples_csv"""'], {}), "(model_dir, 'unique_samples_csv')\n", (15637, 15670), False, 'import os\n'), ((15862, 15935), 'pandas.read_csv', 'pd.read_csv', (['"""/home/jusun/adila001/MNLI/train_heuristic.tsv"""'], {'sep': '"""\t|\n"""'}), "('/home/jusun/adila001/MNLI/train_heuristic.tsv', sep='\\t|\\n')\n", (15873, 15935), True, 'import pandas as pd\n'), ((16326, 16377), 'os.path.join', 'os.path.join', (['model_dir', '"""ANALYSIS_CLEAN"""', '"""SORTED"""'], {}), "(model_dir, 'ANALYSIS_CLEAN', 'SORTED')\n", (16338, 16377), False, 'import os\n'), ((18727, 18800), 'pandas.read_csv', 'pd.read_csv', (['"""/home/jusun/adila001/MNLI/train_heuristic.tsv"""'], {'sep': '"""\t|\n"""'}), "('/home/jusun/adila001/MNLI/train_heuristic.tsv', sep='\\t|\\n')\n", (18738, 18800), True, 'import pandas as pd\n'), ((19230, 19281), 'os.path.join', 'os.path.join', (['model_dir', '"""heuristics_only_csv_EVAL"""'], {}), "(model_dir, 'heuristics_only_csv_EVAL')\n", (19242, 19281), False, 'import os\n'), ((22605, 22621), 'numpy.amax', 'np.amax', (['var_arr'], {}), '(var_arr)\n', (22612, 22621), True, 'import numpy as np\n'), ((22892, 22941), 'os.path.join', 'os.path.join', (['args.model_dir', 'f"""td_metrics.jsonl"""'], {}), "(args.model_dir, f'td_metrics.jsonl')\n", (22904, 22941), False, 'import os\n'), ((27906, 27956), 'os.path.join', 'os.path.join', (['args.model_dir', 'f"""iid_metrics.jsonl"""'], {}), "(args.model_dir, f'iid_metrics.jsonl')\n", (27918, 27956), False, 'import os\n'), ((28073, 28123), 'os.path.join', 'os.path.join', (['args.model_dir', 'f"""ood_metrics.jsonl"""'], {}), "(args.model_dir, f'ood_metrics.jsonl')\n", (28085, 28123), False, 'import os\n'), ((28337, 28363), 'pandas.concat', 'pd.concat', (['[df_id, df_ood]'], {}), '([df_id, df_ood])\n', (28346, 28363), True, 'import pandas as pd\n'), ((28579, 28613), 'pandas.concat', 'pd.concat', (['[df_heuristics, df_ood]'], {}), '([df_heuristics, df_ood])\n', (28588, 28613), True, 'import pandas as pd\n'), ((28988, 29029), 'pandas.concat', 'pd.concat', (['[df_concern, df_others_sample]'], {}), '([df_concern, df_others_sample])\n', (28997, 29029), True, 'import pandas as pd\n'), ((31171, 31198), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (31196, 31198), False, 'import torch\n'), ((31591, 31609), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (31602, 31609), False, 'from collections import defaultdict\n'), ((31627, 31655), 'tqdm.tqdm', 'tqdm.tqdm', (['training_dynamics'], {}), '(training_dynamics)\n', (31636, 31655), False, 'import tqdm\n'), ((34877, 34913), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(12, 10)'}), '(1, 1, figsize=(12, 10))\n', (34889, 34913), True, 'import matplotlib.pyplot as plt\n'), ((34948, 35013), 'seaborn.diverging_palette', 'sns.diverging_palette', (['(260)', '(15)'], {'n': 'num_hues', 'sep': '(10)', 'center': '"""dark"""'}), "(260, 15, n=num_hues, sep=10, center='dark')\n", (34969, 35013), True, 'import seaborn as sns\n'), ((35025, 35210), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': 'main_metric', 'y': 'other_metric', 'ax': 'ax0', 'data': 'df', 'hue': 'hue', 's': '(30)', 'style': 'style', 'marker': "('o' if style is not None else None)", 'palette': "(palette if palette else 'tab10')"}), "(x=main_metric, y=other_metric, ax=ax0, data=df, hue=hue, s=\n 30, style=style, marker='o' if style is not None else None, palette=\n palette if palette else 'tab10')\n", (35040, 35210), True, 'import seaborn as sns\n'), ((37734, 37768), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 6)'}), '(1, 1, figsize=(8, 6))\n', (37746, 37768), True, 'import matplotlib.pyplot as plt\n'), ((37832, 37897), 'seaborn.diverging_palette', 'sns.diverging_palette', (['(260)', '(15)'], {'n': 'num_hues', 'sep': '(10)', 'center': '"""dark"""'}), "(260, 15, n=num_hues, sep=10, center='dark')\n", (37853, 37897), True, 'import seaborn as sns\n'), ((37910, 38018), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': 'main_metric', 'y': 'other_metric', 'ax': 'ax0', 'data': 'df', 'hue': 'hue', 'palette': 'pal', 'style': 'style', 's': '(30)'}), '(x=main_metric, y=other_metric, ax=ax0, data=df, hue=hue,\n palette=pal, style=style, s=30)\n', (37925, 38018), True, 'import seaborn as sns\n'), ((39882, 39957), 'seaborn.set', 'sns.set', ([], {'style': '"""whitegrid"""', 'font_scale': '(1.6)', 'font': '"""Georgia"""', 'context': '"""paper"""'}), "(style='whitegrid', font_scale=1.6, font='Georgia', context='paper')\n", (39889, 39957), True, 'import seaborn as sns\n'), ((40921, 40986), 'seaborn.diverging_palette', 'sns.diverging_palette', (['(260)', '(15)'], {'n': 'num_hues', 'sep': '(10)', 'center': '"""dark"""'}), "(260, 15, n=num_hues, sep=10, center='dark')\n", (40942, 40986), True, 'import seaborn as sns\n'), ((40999, 41115), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': 'main_metric', 'y': 'other_metric', 'ax': 'ax0', 'data': 'dataframe', 'hue': 'hue', 'palette': 'pal', 'style': 'style', 's': '(30)'}), '(x=main_metric, y=other_metric, ax=ax0, data=dataframe, hue=\n hue, palette=pal, style=style, s=30)\n', (41014, 41115), True, 'import seaborn as sns\n'), ((43640, 43665), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (43663, 43665), False, 'import argparse\n'), ((47813, 47850), 'os.path.join', 'os.path.join', (['args.model_dir', '"""plots"""'], {}), "(args.model_dir, 'plots')\n", (47825, 47850), False, 'import os\n'), ((3050, 3062), 'numpy.std', 'np.std', (['conf'], {}), '(conf)\n', (3056, 3062), True, 'import numpy as np\n'), ((9714, 9757), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.task_name'], {}), '(args.data_dir, args.task_name)\n', (9726, 9757), False, 'import os\n'), ((9960, 10065), 'os.path.join', 'os.path.join', (['args.filtering_output_dir', 'f"""cartography_{args.metric}_{fraction:.2f}/{args.task_name}"""'], {}), "(args.filtering_output_dir,\n f'cartography_{args.metric}_{fraction:.2f}/{args.task_name}')\n", (9972, 10065), False, 'import os\n'), ((15682, 15705), 'os.path.exists', 'os.path.exists', (['csv_dir'], {}), '(csv_dir)\n', (15696, 15705), False, 'import os\n'), ((15715, 15735), 'os.makedirs', 'os.makedirs', (['csv_dir'], {}), '(csv_dir)\n', (15726, 15735), False, 'import os\n'), ((15766, 15812), 'os.path.join', 'os.path.join', (['csv_dir', '"""ambiguous_samples.csv"""'], {}), "(csv_dir, 'ambiguous_samples.csv')\n", (15778, 15812), False, 'import os\n'), ((16389, 16412), 'os.path.exists', 'os.path.exists', (['csv_dir'], {}), '(csv_dir)\n', (16403, 16412), False, 'import os\n'), ((16422, 16442), 'os.makedirs', 'os.makedirs', (['csv_dir'], {}), '(csv_dir)\n', (16433, 16442), False, 'import os\n'), ((19293, 19316), 'os.path.exists', 'os.path.exists', (['csv_dir'], {}), '(csv_dir)\n', (19307, 19316), False, 'import os\n'), ((19326, 19346), 'os.makedirs', 'os.makedirs', (['csv_dir'], {}), '(csv_dir)\n', (19337, 19346), False, 'import os\n'), ((27038, 27111), 'os.path.join', 'os.path.join', (['args.plots_dir', '"""train_plots"""', 'f"""TRAIN_{ep}_epochs_ALL.gif"""'], {}), "(args.plots_dir, 'train_plots', f'TRAIN_{ep}_epochs_ALL.gif')\n", (27050, 27111), False, 'import os\n'), ((27193, 27231), 'imageio.mimsave', 'imageio.mimsave', (['gif_path', 'figs'], {'fps': '(1)'}), '(gif_path, figs, fps=1)\n', (27208, 27231), False, 'import imageio\n'), ((30301, 30368), 'os.path.join', 'os.path.join', (['args.plots_dir', '"""eval_plots"""', 'f"""EVAL_{ep}_epochs.gif"""'], {}), "(args.plots_dir, 'eval_plots', f'EVAL_{ep}_epochs.gif')\n", (30313, 30368), False, 'import os\n'), ((30377, 30415), 'imageio.mimsave', 'imageio.mimsave', (['gif_path', 'figs'], {'fps': '(1)'}), '(gif_path, figs, fps=1)\n', (30392, 30415), False, 'import imageio\n'), ((31085, 31097), 'numpy.std', 'np.std', (['conf'], {}), '(conf)\n', (31091, 31097), True, 'import numpy as np\n'), ((32633, 32658), 'numpy.mean', 'np.mean', (['true_probs_trend'], {}), '(true_probs_trend)\n', (32640, 32658), True, 'import numpy as np\n'), ((34662, 34686), 'os.path.exists', 'os.path.exists', (['plot_dir'], {}), '(plot_dir)\n', (34676, 34686), False, 'import os\n'), ((34696, 34717), 'os.makedirs', 'os.makedirs', (['plot_dir'], {}), '(plot_dir)\n', (34707, 34717), False, 'import os\n'), ((37260, 37284), 'os.path.exists', 'os.path.exists', (['plot_dir'], {}), '(plot_dir)\n', (37274, 37284), False, 'import os\n'), ((37294, 37315), 'os.makedirs', 'os.makedirs', (['plot_dir'], {}), '(plot_dir)\n', (37305, 37315), False, 'import os\n'), ((40671, 40705), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 6)'}), '(1, 1, figsize=(8, 6))\n', (40683, 40705), True, 'import matplotlib.pyplot as plt\n'), ((40730, 40758), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 10)'}), '(figsize=(14, 10))\n', (40740, 40758), True, 'import matplotlib.pyplot as plt\n'), ((43120, 43188), 'seaborn.countplot', 'sns.countplot', ([], {'x': '"""correct."""', 'data': 'dataframe', 'ax': 'ax3', 'color': '"""#86bf91"""'}), "(x='correct.', data=dataframe, ax=ax3, color='#86bf91')\n", (43133, 43188), True, 'import seaborn as sns\n'), ((47860, 47890), 'os.path.exists', 'os.path.exists', (['args.plots_dir'], {}), '(args.plots_dir)\n', (47874, 47890), False, 'import os\n'), ((47898, 47925), 'os.makedirs', 'os.makedirs', (['args.plots_dir'], {}), '(args.plots_dir)\n', (47909, 47925), False, 'import os\n'), ((48440, 48589), 'cartography.selection.selection_utils.read_dynamics', 'read_dynamics', (['args.model_dir'], {'strip_last': "(True if args.task_name in ['QNLI'] else False)", 'burn_out': '(args.burn_out if args.burn_out < 100 else None)'}), "(args.model_dir, strip_last=True if args.task_name in ['QNLI']\n else False, burn_out=args.burn_out if args.burn_out < 100 else None)\n", (48453, 48589), False, 'from cartography.selection.selection_utils import read_dynamics\n'), ((50571, 50740), 'cartography.selection.selection_utils.read_dynamics', 'read_dynamics', (['args.model_dir'], {'strip_last': "(True if args.task_name in ['QNLI'] else False)", 'burn_out': '(args.burn_out if args.burn_out < 100 else None)', 'mode': '"""eval_ID"""'}), "(args.model_dir, strip_last=True if args.task_name in ['QNLI']\n else False, burn_out=args.burn_out if args.burn_out < 100 else None,\n mode='eval_ID')\n", (50584, 50740), False, 'from cartography.selection.selection_utils import read_dynamics\n'), ((51047, 51217), 'cartography.selection.selection_utils.read_dynamics', 'read_dynamics', (['args.model_dir'], {'strip_last': "(True if args.task_name in ['QNLI'] else False)", 'burn_out': '(args.burn_out if args.burn_out < 100 else None)', 'mode': '"""eval_OOD"""'}), "(args.model_dir, strip_last=True if args.task_name in ['QNLI']\n else False, burn_out=args.burn_out if args.burn_out < 100 else None,\n mode='eval_OOD')\n", (51060, 51217), False, 'from cartography.selection.selection_utils import read_dynamics\n'), ((4435, 4458), 'numpy.argmax', 'np.argmax', (['epoch_logits'], {}), '(epoch_logits)\n', (4444, 4458), True, 'import numpy as np\n'), ((9161, 9227), 'os.path.join', 'os.path.join', (['args.filtering_output_dir', 'f"""filtering_configs.json"""'], {}), "(args.filtering_output_dir, f'filtering_configs.json')\n", (9173, 9227), False, 'import os\n'), ((10099, 10121), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (10113, 10121), False, 'import os\n'), ((10129, 10148), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (10140, 10148), False, 'import os\n'), ((25506, 25575), 'os.path.join', 'os.path.join', (['args.plots_dir', '"""train_plots"""', 'f"""TRAIN_{ep}_epochs.gif"""'], {}), "(args.plots_dir, 'train_plots', f'TRAIN_{ep}_epochs.gif')\n", (25518, 25575), False, 'import os\n'), ((25661, 25699), 'imageio.mimsave', 'imageio.mimsave', (['gif_path', 'figs'], {'fps': '(1)'}), '(gif_path, figs, fps=1)\n', (25676, 25699), False, 'import imageio\n'), ((26815, 26858), 'os.path.join', 'os.path.join', (['args.plots_dir', '"""train_plots"""'], {}), "(args.plots_dir, 'train_plots')\n", (26827, 26858), False, 'import os\n'), ((30041, 30083), 'os.path.join', 'os.path.join', (['args.plots_dir', '"""eval_plots"""'], {}), "(args.plots_dir, 'eval_plots')\n", (30053, 30083), False, 'import os\n'), ((32104, 32127), 'numpy.argmax', 'np.argmax', (['epoch_logits'], {}), '(epoch_logits)\n', (32113, 32127), True, 'import numpy as np\n'), ((49084, 49156), 'pandas.read_csv', 'pd.read_csv', (['"""/home/jusun/adila001/RTE/train_heuristic.tsv"""'], {'sep': '"""\t|\n"""'}), "('/home/jusun/adila001/RTE/train_heuristic.tsv', sep='\\t|\\n')\n", (49095, 49156), True, 'import pandas as pd\n'), ((53115, 53185), 'pandas.read_csv', 'pd.read_csv', (['"""/home/jusun/adila001/RTE/dev_heuristic.tsv"""'], {'sep': '"""\t|\n"""'}), "('/home/jusun/adila001/RTE/dev_heuristic.tsv', sep='\\t|\\n')\n", (53126, 53185), True, 'import pandas as pd\n'), ((53909, 53982), 'pandas.read_csv', 'pd.read_csv', (['"""/home/jusun/adila001/WNLI/train_heuristic.tsv"""'], {'sep': '"""\t|\n"""'}), "('/home/jusun/adila001/WNLI/train_heuristic.tsv', sep='\\t|\\n')\n", (53920, 53982), True, 'import pandas as pd\n'), ((4279, 4305), 'torch.Tensor', 'torch.Tensor', (['epoch_logits'], {}), '(epoch_logits)\n', (4291, 4305), False, 'import torch\n'), ((4774, 4799), 'numpy.mean', 'np.mean', (['true_probs_trend'], {}), '(true_probs_trend)\n', (4781, 4799), True, 'import numpy as np\n'), ((9264, 9315), 'json.dumps', 'json.dumps', (['argparse_dict'], {'indent': '(4)', 'sort_keys': '(True)'}), '(argparse_dict, indent=4, sort_keys=True)\n', (9274, 9315), False, 'import json\n'), ((10254, 10297), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.task_name'], {}), '(args.data_dir, args.task_name)\n', (10266, 10297), False, 'import os\n'), ((10400, 10434), 'os.path.join', 'os.path.join', (['outdir', 'f"""train.tsv"""'], {}), "(outdir, f'train.tsv')\n", (10412, 10434), False, 'import os\n'), ((10719, 10748), 'pandas.concat', 'pd.concat', (['[hardest, easiest]'], {}), '([hardest, easiest])\n', (10728, 10748), True, 'import pandas as pd\n'), ((25200, 25243), 'os.path.join', 'os.path.join', (['args.plots_dir', '"""train_plots"""'], {}), "(args.plots_dir, 'train_plots')\n", (25212, 25243), False, 'import os\n'), ((31930, 31956), 'torch.Tensor', 'torch.Tensor', (['epoch_logits'], {}), '(epoch_logits)\n', (31942, 31956), False, 'import torch\n'), ((28886, 28910), 'numpy.ceil', 'np.ceil', (['df_ood.shape[0]'], {}), '(df_ood.shape[0])\n', (28893, 28910), True, 'import numpy as np\n'), ((34107, 34130), 'torch.Tensor', 'torch.Tensor', (['logits[i]'], {}), '(logits[i])\n', (34119, 34130), False, 'import torch\n'), ((34132, 34160), 'torch.LongTensor', 'torch.LongTensor', (['targets[i]'], {}), '(targets[i])\n', (34148, 34160), False, 'import torch\n'), ((7019, 7042), 'torch.Tensor', 'torch.Tensor', (['logits[i]'], {}), '(logits[i])\n', (7031, 7042), False, 'import torch\n'), ((7044, 7072), 'torch.LongTensor', 'torch.LongTensor', (['targets[i]'], {}), '(targets[i])\n', (7060, 7072), False, 'import torch\n'), ((8027, 8050), 'torch.Tensor', 'torch.Tensor', (['logits[i]'], {}), '(logits[i])\n', (8039, 8050), False, 'import torch\n'), ((8052, 8080), 'torch.LongTensor', 'torch.LongTensor', (['targets[i]'], {}), '(targets[i])\n', (8068, 8080), False, 'import torch\n')] |
from .metric import Metric
import numpy as np
class SpatialDensity(Metric):
'''
This Metric calculates the Spatialdensity
accross the screen
'''
def __init__(self, fixation_array, cellx, celly, screen_dimension):
super().__init__(fixation_array)
self.cellx = cellx
self.celly = celly
self.screen_x, self.screen_y = screen_dimension
self.num_cells = (
screen_dimension[0] / cellx) * (screen_dimension[1] / celly)
def get_grid(self):
"""Returns the grid after filling the cells that were visited
Returns
-------
numpy array
"""
res = self.compute()
return self.grid
def compute(self):
"""Calculates the SpatialDensity as
defined in <NAME>., & <NAME>. (1999)
Dividing the screen into equal cell sizes
Returns
-------
float
spatialDensity
"""
num_height = int(self.screen_y / self.celly) # number of cells y
num_width = int(self.screen_x / self.cellx) # number of cells x
# init empty grid to check visited
self.grid = np.zeros((num_height, num_width))
# creating array of cell edges for the width and height
w = np.linspace(0, self.screen_x, num=num_width + 1)
h = np.linspace(0, self.screen_y, num=num_height + 1)
for pos,(x,y) in enumerate(self.fixation_array):
try:
x = float(x)
y = float(y)
except:
raise Exception("Invalid X or Y type at position".format(pos))
if x > self.screen_x or x < 0:
raise Exception('invalid X value at position {}'.format(pos))
if y > self.screen_y or y < 0:
raise Exception('invalid Y value at position {}'.format(pos))
# making sure the x and y are not exactly equal to the max screeny and screenx
if x == self.screen_x:
x = self.screen_x - 0.001
if y == self.screen_y:
y = self.screen_y - 0.001
i = len(h) - 2 - np.where(h == h[h <= y][-1])[0][0] # cell number
j = np.where(w == w[w <= x][-1])[0][0] # cell number
self.grid[i, j] = 1
res = np.sum(self.grid) / self.num_cells
assert(res <= 1 and res >= 0),'Invalid spatialDensity value'
return res
| [
"numpy.where",
"numpy.sum",
"numpy.zeros",
"numpy.linspace"
] | [((1218, 1251), 'numpy.zeros', 'np.zeros', (['(num_height, num_width)'], {}), '((num_height, num_width))\n', (1226, 1251), True, 'import numpy as np\n'), ((1332, 1380), 'numpy.linspace', 'np.linspace', (['(0)', 'self.screen_x'], {'num': '(num_width + 1)'}), '(0, self.screen_x, num=num_width + 1)\n', (1343, 1380), True, 'import numpy as np\n'), ((1394, 1443), 'numpy.linspace', 'np.linspace', (['(0)', 'self.screen_y'], {'num': '(num_height + 1)'}), '(0, self.screen_y, num=num_height + 1)\n', (1405, 1443), True, 'import numpy as np\n'), ((2386, 2403), 'numpy.sum', 'np.sum', (['self.grid'], {}), '(self.grid)\n', (2392, 2403), True, 'import numpy as np\n'), ((2284, 2312), 'numpy.where', 'np.where', (['(w == w[w <= x][-1])'], {}), '(w == w[w <= x][-1])\n', (2292, 2312), True, 'import numpy as np\n'), ((2217, 2245), 'numpy.where', 'np.where', (['(h == h[h <= y][-1])'], {}), '(h == h[h <= y][-1])\n', (2225, 2245), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Detect CV (consonant-vowel) pair events in speaker and microphone channels.
"""
# Third party libraries
import numpy as np
import scipy.signal as sgn
from ecogvis.signal_processing.resample import resample
def detect_events(speaker_data, mic_data=None, interval=None, dfact=30,
smooth_width=0.4, speaker_threshold=0.05, mic_threshold=0.05,
direction='both'):
"""
Automatically detects events in audio signals.
Parameters
----------
speaker_data : 'pynwb.base.TimeSeries' object
Object containing speaker data.
mic_data : 'pynwb.base.TimeSeries' object
Object containing microphone data.
interval : list of floats
Interval to be used [Start_bin, End_bin]. If 'None', the whole
signal is used.
dfact : float
Downsampling factor. Default 30.
smooth_width: float
Width scale for median smoothing filter (default = .4, decent for CVs).
speaker_threshold : float
Sets threshold level for speaker.
mic_threshold : float
Sets threshold level for mic.
direction : str
'Up' detects events start times. 'Down' detects events stop times.
'Both'
detects both start and stop times.
Returns
-------
speakerDS : 1D array of floats
Downsampled speaker signal.
speakerEventDS : 1D array of floats
Event times for speaker signal.
speakerFilt : 1D array of floats
Filtered speaker signal.
micDS : 1D array of floats
Downsampled microphone signal.
micEventDS : 1D array of floats
Event times for microphone signal.
micFilt : 1D array of floats
Filtered microphone signal.
"""
# Downsampling Speaker ---------------------------------------------------
speakerDS, speakerEventDS, speakerFilt = None, None, None
if speaker_data is not None:
if interval is None:
X = speaker_data.data[:]
else:
X = speaker_data.data[interval[0]:interval[1]]
fs = speaker_data.rate # sampling rate
ds = fs / dfact
# Pad zeros to make signal length a power of 2, improves performance
nBins = X.shape[0]
extraBins = 2 ** (np.ceil(np.log2(nBins)).astype('int')) - nBins
extraZeros = np.zeros(extraBins)
X = np.append(X, extraZeros)
speakerDS = resample(X, ds, fs)
# Remove excess bins (because of zero padding on previous step)
excessBins = int(np.ceil(extraBins * ds / fs))
speakerDS = speakerDS[0:-excessBins]
# Kernel size must be an odd number
speakerFilt = sgn.medfilt(
volume=np.diff(np.append(speakerDS, speakerDS[-1])) ** 2,
kernel_size=int((smooth_width * ds // 2) * 2 + 1))
# Normalize the filtered signal.
speakerFilt /= np.max(np.abs(speakerFilt))
# Find threshold crossing times
stimBinsDS = threshcross(speakerFilt, speaker_threshold, direction)
# Remove events that have a duration less than 0.1 s.
speaker_events = stimBinsDS.reshape((-1, 2))
rem_ind = np.where((speaker_events[:, 1] - speaker_events[:, 0]) < ds * 0.1)[0]
speaker_events = np.delete(speaker_events, rem_ind, axis=0)
stimBinsDS = speaker_events.reshape((-1))
# Transform bins to time
speakerEventDS = (stimBinsDS / ds) + (interval[0] / fs)
# Downsampling Mic -------------------------------------------------------
micDS, micEventDS, micFilt = None, None, None
if mic_data is not None:
if interval is None:
X = mic_data.data[:]
else:
X = mic_data.data[interval[0]:interval[1]]
fs = mic_data.rate # sampling rate
ds = fs / dfact
# Pad zeros to make signal length a power of 2, improves performance
nBins = X.shape[0]
extraBins = 2 ** (np.ceil(np.log2(nBins)).astype('int')) - nBins
extraZeros = np.zeros(extraBins)
X = np.append(X, extraZeros)
micDS = resample(X, ds, fs)
# Remove excess bins (because of zero padding on previous step)
excessBins = int(np.ceil(extraBins * ds / fs))
micDS = micDS[0:-excessBins]
# Remove mic response to speaker
micDS[np.where(speakerFilt > speaker_threshold)[0]] = 0
micFilt = sgn.medfilt(volume=np.diff(np.append(micDS, micDS[-1])) ** 2,
kernel_size=int(
(smooth_width * ds // 2) * 2 + 1))
# Normalize the filtered signal.
micFilt /= np.max(np.abs(micFilt))
# Find threshold crossing times
micBinsDS = threshcross(micFilt, mic_threshold, direction)
# Remove events that have a duration less than 0.1 s.
mic_events = micBinsDS.reshape((-1, 2))
rem_ind = np.where((mic_events[:, 1] - mic_events[:, 0]) < ds * 0.1)[0]
mic_events = np.delete(mic_events, rem_ind, axis=0)
micBinsDS = mic_events.reshape((-1))
# Transform bins to time
micEventDS = (micBinsDS / ds) + (interval[0] / fs)
return speakerDS, speakerEventDS, speakerFilt, micDS, micEventDS, micFilt
def threshcross(data, threshold=0, direction='up'):
"""
Outputs the indices where the signal crossed the threshold.
Parameters
----------
data : array of floats
Numpy array of floats, containing signal.
threshold : float
Value of threshold.
direction : str
Defines the direction of cross detected: 'up', 'down', or 'both'.
With 'both', it will check to make sure that up and down crosses are
detected. In other words,
Returns
-------
out : array
Array with indices where data crossed threshold.
"""
# Find crosses
over = (data >= threshold).astype('int')
cross = np.append(False, np.diff(over))
if direction == 'up':
out = np.where(cross == 1)[0]
elif direction == 'down':
out = np.where(cross == -1)[0]
elif direction == 'both':
cross_nonzero = np.where(cross != 0)[0]
events = []
for i in range(len(cross_nonzero) - 1):
# Skip to the next ind if this one was already recorded.
if cross_nonzero[i] in events:
continue
if (cross[cross_nonzero[i]] == 1) and (
cross[cross_nonzero[i + 1]] == -1):
events.append(cross_nonzero[i])
events.append(cross_nonzero[i + 1])
out = np.array(events)
return out
| [
"numpy.abs",
"numpy.ceil",
"ecogvis.signal_processing.resample.resample",
"numpy.where",
"numpy.delete",
"numpy.diff",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.log2"
] | [((2331, 2350), 'numpy.zeros', 'np.zeros', (['extraBins'], {}), '(extraBins)\n', (2339, 2350), True, 'import numpy as np\n'), ((2363, 2387), 'numpy.append', 'np.append', (['X', 'extraZeros'], {}), '(X, extraZeros)\n', (2372, 2387), True, 'import numpy as np\n'), ((2408, 2427), 'ecogvis.signal_processing.resample.resample', 'resample', (['X', 'ds', 'fs'], {}), '(X, ds, fs)\n', (2416, 2427), False, 'from ecogvis.signal_processing.resample import resample\n'), ((3253, 3295), 'numpy.delete', 'np.delete', (['speaker_events', 'rem_ind'], {'axis': '(0)'}), '(speaker_events, rem_ind, axis=0)\n', (3262, 3295), True, 'import numpy as np\n'), ((4005, 4024), 'numpy.zeros', 'np.zeros', (['extraBins'], {}), '(extraBins)\n', (4013, 4024), True, 'import numpy as np\n'), ((4037, 4061), 'numpy.append', 'np.append', (['X', 'extraZeros'], {}), '(X, extraZeros)\n', (4046, 4061), True, 'import numpy as np\n'), ((4078, 4097), 'ecogvis.signal_processing.resample.resample', 'resample', (['X', 'ds', 'fs'], {}), '(X, ds, fs)\n', (4086, 4097), False, 'from ecogvis.signal_processing.resample import resample\n'), ((4970, 5008), 'numpy.delete', 'np.delete', (['mic_events', 'rem_ind'], {'axis': '(0)'}), '(mic_events, rem_ind, axis=0)\n', (4979, 5008), True, 'import numpy as np\n'), ((5915, 5928), 'numpy.diff', 'np.diff', (['over'], {}), '(over)\n', (5922, 5928), True, 'import numpy as np\n'), ((2526, 2554), 'numpy.ceil', 'np.ceil', (['(extraBins * ds / fs)'], {}), '(extraBins * ds / fs)\n', (2533, 2554), True, 'import numpy as np\n'), ((2886, 2905), 'numpy.abs', 'np.abs', (['speakerFilt'], {}), '(speakerFilt)\n', (2892, 2905), True, 'import numpy as np\n'), ((3158, 3222), 'numpy.where', 'np.where', (['(speaker_events[:, 1] - speaker_events[:, 0] < ds * 0.1)'], {}), '(speaker_events[:, 1] - speaker_events[:, 0] < ds * 0.1)\n', (3166, 3222), True, 'import numpy as np\n'), ((4196, 4224), 'numpy.ceil', 'np.ceil', (['(extraBins * ds / fs)'], {}), '(extraBins * ds / fs)\n', (4203, 4224), True, 'import numpy as np\n'), ((4633, 4648), 'numpy.abs', 'np.abs', (['micFilt'], {}), '(micFilt)\n', (4639, 4648), True, 'import numpy as np\n'), ((4887, 4943), 'numpy.where', 'np.where', (['(mic_events[:, 1] - mic_events[:, 0] < ds * 0.1)'], {}), '(mic_events[:, 1] - mic_events[:, 0] < ds * 0.1)\n', (4895, 4943), True, 'import numpy as np\n'), ((5971, 5991), 'numpy.where', 'np.where', (['(cross == 1)'], {}), '(cross == 1)\n', (5979, 5991), True, 'import numpy as np\n'), ((4319, 4360), 'numpy.where', 'np.where', (['(speakerFilt > speaker_threshold)'], {}), '(speakerFilt > speaker_threshold)\n', (4327, 4360), True, 'import numpy as np\n'), ((6039, 6060), 'numpy.where', 'np.where', (['(cross == -1)'], {}), '(cross == -1)\n', (6047, 6060), True, 'import numpy as np\n'), ((6573, 6589), 'numpy.array', 'np.array', (['events'], {}), '(events)\n', (6581, 6589), True, 'import numpy as np\n'), ((6118, 6138), 'numpy.where', 'np.where', (['(cross != 0)'], {}), '(cross != 0)\n', (6126, 6138), True, 'import numpy as np\n'), ((2708, 2743), 'numpy.append', 'np.append', (['speakerDS', 'speakerDS[-1]'], {}), '(speakerDS, speakerDS[-1])\n', (2717, 2743), True, 'import numpy as np\n'), ((4414, 4441), 'numpy.append', 'np.append', (['micDS', 'micDS[-1]'], {}), '(micDS, micDS[-1])\n', (4423, 4441), True, 'import numpy as np\n'), ((2271, 2285), 'numpy.log2', 'np.log2', (['nBins'], {}), '(nBins)\n', (2278, 2285), True, 'import numpy as np\n'), ((3945, 3959), 'numpy.log2', 'np.log2', (['nBins'], {}), '(nBins)\n', (3952, 3959), True, 'import numpy as np\n')] |
import numpy as np
import mxnet as mx
from mxnet import nd, autograd, gluon
from mxnet.gluon import nn, Block
from mxnet.gluon.loss import Loss
class InnerEncoderBlock(Block):
def __init__(self, num_filters, **kwargs):
super(InnerEncoderBlock, self).__init__(**kwargs)
with self.name_scope():
self.layer_norm = nn.LayerNorm()
self.conv = nn.Conv2D(num_filters)
def forward(self, x):
x_skip = x
x = self.layer_norm(x)
x = self.conv(x)
x = nd.relu(x)
x = x_skip + x
return x
class SelfAttetionBlock(Block):
def __init__(self, **kwargs):
super(SelfAttetionBlock, self).__init__(**kwargs)
with self.name_scope():
self.layer_norm = nn.LayerNorm()
def forward(self, x):
x_skip = x
x = self.layer_norm(x)
q = x
k = x
v = x
dk = nd.sqrt(len(nd.shape(k)))
qk = nd.softmax(q * k.T * 1. / dk)
qkv = qk*v
x = qkv + x_skip
return x
class DenseBlock(Block):
def __init__(self, num_units, **kwargs):
super(DenseBlock, self).__init__(**kwargs)
with self.name_scope():
self.layer_norm = nn.LayerNorm()
self.dense = nn.Dense(num_units)
def forward(self, x):
x_skip = x
x = self.layer_norm(x)
x = self.dense(x)
x = x + x_skip
return x
class EncoderBlock(Block):
def __init__(self, num_units, batch_size, sentence_size, embedding_size, **kwargs):
super(EncoderBlock, self).__init__(**kwargs)
self.batch_size = batch_size
self.sentence_size = sentence_size
self.embedding_size = embedding_size
with self.name_scope():
self.inner_block = InnerEncoderBlock(num_units)
self.self_attetion_block = SelfAttetionBlock()
self.dense_block = DenseBlock(num_units)
def position_encoding(self, x):
"""
Position Encoding described in section 4.1 of
End-To-End Memory Networks (https://arxiv.org/abs/1503.08895).
"""
encoding = np.ones((self.sentence_size, self.embedding_size), dtype=np.float32)
ls = sentence_size + 1
le = embedding_size + 1
for k in range(1, le):
for j in range(1, ls):
encoding[j-1, k-1] = (1.0 - j/float(ls)) - (
k / float(le)) * (1. - 2. * j/float(ls))
encoding = nd.tile(encoding, reps=(self.batch_size, 1, 1))
x = encoding + x
return x
def forward(self, x):
x = position_encoding(x)
x = self.inner_block(x)
x = self.self_attetion_block(x)
x = self.dense_block(x)
return x
def ContextQueryAttention(Block):
def __init__(self, num_units, **kwargs):
super(ContextQueryAttention, self).__init__(**kwargs)
with self.name_scope():
self.dense = nn.Dense(num_units)
def forward(self, c, q):
x = nd.concat(c, q, c*q)
S = self.dense(x)
S_bar = nd.softmax(S, axis=1)
S_2bar = nd.softmax(S, axis=2)
A = S_bar * Q.T
B = S_bar * S_2_bar.T * c.T
return A, B
class ModelEncoder(Block):
def __init__(self, num_units, batch_size, sentence_size, embedding_size, **kwargs):
super(ModelEncoder, self).__init__(**kwargs)
with self.name_scope():
self.context_query_attention = ContextQueryAttention(num_units)
self.encoder0 = EncoderBlock(num_units, batch_size, sentence_size, embedding_size)
self.encoder1 = EncoderBlock(num_units, batch_size, sentence_size, embedding_size)
self.encoder2 = EncoderBlock(num_units, batch_size, sentence_size, embedding_size)
def forward(self, C, Q):
A, B = self.context_query_attention(C, Q)
x = nd.concat(C, A, C * A, C * B)
enc0 = self.encoder0(x)
enc1 = self.encoder1(enc0)
enc2 = self.encoder2(enc2)
return enc0, enc1, enc2
class OutputLayer(Block):
def __init__(self, num_units, **kwargs):
super(OutputLayer, self).__init__(**kwargs)
with self.name_scope():
self.dense = nn.Dense(num_units)
self.weight = self.params.get(
'weight', init=mx.init.Xavier(magnitude=2.24),
shape=(num_units, num_units))
def forward(self, enc1, enc2):
x = nd.concat(enc1, enc2)
x = self.dense(x)
x = nd.log_softmax(x)
return x
class QANet(Block):
def __init__(self, num_units, batch_size, sentence_size, embedding_size, **kwargs):
super(QANet, self).__init__(**kwargs)
with self.name_scope():
self.context_encoder = EncoderBlock(num_units, batch_size, sentence_size, embedding_size)
self.query_encoder = EncoderBlock(num_units, batch_size, sentence_size, embedding_size)
self.model_encoder = ModelEncoder(num_units, batch_size, sentence_size, embedding_size)
self.output_start = OutputLayer(num_units)
self.output_end = OutputLayer(num_units)
def forward(self, c, q):
c = self.context_encoder(c)
q = self.query_encoder(q)
enc0, enc1, enc2 = self.model_encoder(c, q)
start = self.output_start(enc0, enc1)
end = self.output_end(enc0, enc2)
return start, end
class LogLoss(Loss):
def __init__(self, weight=1., batch_axis=0, **kwargs):
super(LogLoss, self).__init__(weight, batch_axis, **kwargs)
def forward(self, p1, p2):
loss = -nd.mean(p1+p2, axis=self._batch_axis, exclude=True)
return loss
| [
"mxnet.nd.relu",
"mxnet.nd.mean",
"numpy.ones",
"mxnet.gluon.nn.Conv2D",
"mxnet.gluon.nn.Dense",
"mxnet.nd.log_softmax",
"mxnet.init.Xavier",
"mxnet.nd.shape",
"mxnet.nd.tile",
"mxnet.gluon.nn.LayerNorm",
"mxnet.nd.softmax",
"mxnet.nd.concat"
] | [((519, 529), 'mxnet.nd.relu', 'nd.relu', (['x'], {}), '(x)\n', (526, 529), False, 'from mxnet import nd, autograd, gluon\n'), ((944, 974), 'mxnet.nd.softmax', 'nd.softmax', (['(q * k.T * 1.0 / dk)'], {}), '(q * k.T * 1.0 / dk)\n', (954, 974), False, 'from mxnet import nd, autograd, gluon\n'), ((2125, 2193), 'numpy.ones', 'np.ones', (['(self.sentence_size, self.embedding_size)'], {'dtype': 'np.float32'}), '((self.sentence_size, self.embedding_size), dtype=np.float32)\n', (2132, 2193), True, 'import numpy as np\n'), ((2464, 2511), 'mxnet.nd.tile', 'nd.tile', (['encoding'], {'reps': '(self.batch_size, 1, 1)'}), '(encoding, reps=(self.batch_size, 1, 1))\n', (2471, 2511), False, 'from mxnet import nd, autograd, gluon\n'), ((2995, 3017), 'mxnet.nd.concat', 'nd.concat', (['c', 'q', '(c * q)'], {}), '(c, q, c * q)\n', (3004, 3017), False, 'from mxnet import nd, autograd, gluon\n'), ((3058, 3079), 'mxnet.nd.softmax', 'nd.softmax', (['S'], {'axis': '(1)'}), '(S, axis=1)\n', (3068, 3079), False, 'from mxnet import nd, autograd, gluon\n'), ((3097, 3118), 'mxnet.nd.softmax', 'nd.softmax', (['S'], {'axis': '(2)'}), '(S, axis=2)\n', (3107, 3118), False, 'from mxnet import nd, autograd, gluon\n'), ((3853, 3882), 'mxnet.nd.concat', 'nd.concat', (['C', 'A', '(C * A)', '(C * B)'], {}), '(C, A, C * A, C * B)\n', (3862, 3882), False, 'from mxnet import nd, autograd, gluon\n'), ((4418, 4439), 'mxnet.nd.concat', 'nd.concat', (['enc1', 'enc2'], {}), '(enc1, enc2)\n', (4427, 4439), False, 'from mxnet import nd, autograd, gluon\n'), ((4478, 4495), 'mxnet.nd.log_softmax', 'nd.log_softmax', (['x'], {}), '(x)\n', (4492, 4495), False, 'from mxnet import nd, autograd, gluon\n'), ((344, 358), 'mxnet.gluon.nn.LayerNorm', 'nn.LayerNorm', ([], {}), '()\n', (356, 358), False, 'from mxnet.gluon import nn, Block\n'), ((383, 405), 'mxnet.gluon.nn.Conv2D', 'nn.Conv2D', (['num_filters'], {}), '(num_filters)\n', (392, 405), False, 'from mxnet.gluon import nn, Block\n'), ((757, 771), 'mxnet.gluon.nn.LayerNorm', 'nn.LayerNorm', ([], {}), '()\n', (769, 771), False, 'from mxnet.gluon import nn, Block\n'), ((1219, 1233), 'mxnet.gluon.nn.LayerNorm', 'nn.LayerNorm', ([], {}), '()\n', (1231, 1233), False, 'from mxnet.gluon import nn, Block\n'), ((1259, 1278), 'mxnet.gluon.nn.Dense', 'nn.Dense', (['num_units'], {}), '(num_units)\n', (1267, 1278), False, 'from mxnet.gluon import nn, Block\n'), ((2934, 2953), 'mxnet.gluon.nn.Dense', 'nn.Dense', (['num_units'], {}), '(num_units)\n', (2942, 2953), False, 'from mxnet.gluon import nn, Block\n'), ((4198, 4217), 'mxnet.gluon.nn.Dense', 'nn.Dense', (['num_units'], {}), '(num_units)\n', (4206, 4217), False, 'from mxnet.gluon import nn, Block\n'), ((5573, 5626), 'mxnet.nd.mean', 'nd.mean', (['(p1 + p2)'], {'axis': 'self._batch_axis', 'exclude': '(True)'}), '(p1 + p2, axis=self._batch_axis, exclude=True)\n', (5580, 5626), False, 'from mxnet import nd, autograd, gluon\n'), ((916, 927), 'mxnet.nd.shape', 'nd.shape', (['k'], {}), '(k)\n', (924, 927), False, 'from mxnet import nd, autograd, gluon\n'), ((4292, 4322), 'mxnet.init.Xavier', 'mx.init.Xavier', ([], {'magnitude': '(2.24)'}), '(magnitude=2.24)\n', (4306, 4322), True, 'import mxnet as mx\n')] |
def set_seeds(seed_val=42):
'''fix seeds for reproducibility.
'''
from numpy.random import seed
seed(seed_val)
from tensorflow import random
random.set_seed(seed_val)
def get_zero_based_task_id(default_return=None):
'''fetches the environment variable for this process' task id.
Returns None if process is not run in an SGE environment.
'''
import os
sge_id = os.environ.get('SGE_TASK_ID', None)
if sge_id is None:
return default_return
return int(sge_id) - 1
| [
"tensorflow.random.set_seed",
"os.environ.get",
"numpy.random.seed"
] | [((113, 127), 'numpy.random.seed', 'seed', (['seed_val'], {}), '(seed_val)\n', (117, 127), False, 'from numpy.random import seed\n'), ((166, 191), 'tensorflow.random.set_seed', 'random.set_seed', (['seed_val'], {}), '(seed_val)\n', (181, 191), False, 'from tensorflow import random\n'), ((409, 444), 'os.environ.get', 'os.environ.get', (['"""SGE_TASK_ID"""', 'None'], {}), "('SGE_TASK_ID', None)\n", (423, 444), False, 'import os\n')] |
#!/usr/bin/python3
from ctypes import *
import cv2
import numpy as np
import sys
import os
import time
from ipdb import set_trace as dbg
from enum import IntEnum
import imutils
tracking=False
lib_dir='/home/atsg/PycharmProjects/face_recognition/FaceKit/PCN/'
class CPoint(Structure):
_fields_ = [("x", c_int),
("y", c_int)]
FEAT_POINTS = 14
class CWindow(Structure):
_fields_ = [("x", c_int),
("y", c_int),
("width", c_int),
("angle", c_int),
("score", c_float),
("points",CPoint*FEAT_POINTS)]
class FeatEnam(IntEnum):
CHIN_0 = 0
CHIN_1 = 1
CHIN_2 = 2
CHIN_3 = 3
CHIN_4 = 4
CHIN_5 = 5
CHIN_6 = 6
CHIN_7 = 7
CHIN_8 = 8
NOSE = 9
EYE_LEFT = 10
EYE_RIGHT = 11
MOUTH_LEFT = 12
MOUTH_RIGHT = 13
FEAT_POINTS = 14
if (tracking):
lib = CDLL(os.path.join(lib_dir,'libPCN_tracking.so'))
else:
lib = CDLL(os.path.join(lib_dir,'libPCN_no_tracking.so'))
init_detector = lib.init_detector
#void *init_detector(const char *detection_model_path,
# const char *pcn1_proto, const char *pcn2_proto, const char *pcn3_proto,
# const char *tracking_model_path, const char *tracking_proto,
# int min_face_size, float pyramid_scale_factor, float detection_thresh_stage1,
# float detection_thresh_stage2, float detection_thresh_stage3, int tracking_period,
# float tracking_thresh, int do_smooth)
init_detector.argtypes = [
c_char_p, c_char_p, c_char_p,
c_char_p, c_char_p, c_char_p,
c_int,c_float,c_float,c_float,
c_float,c_int,c_float,c_int]
init_detector.restype = c_void_p
#CWindow* detect_faces(void* pcn, unsigned char* raw_img,size_t rows, size_t cols, int *lwin)
detect_faces = lib.detect_faces
detect_faces.argtypes = [c_void_p, POINTER(c_ubyte),c_size_t,c_size_t,POINTER(c_int)]
detect_faces.restype = POINTER(CWindow)
#void free_faces(CWindow* wins)
free_faces = lib.free_faces
free_faces.argtypes= [c_void_p]
# void free_detector(void *pcn)
free_detector = lib.free_detector
free_detector.argtypes= [c_void_p]
CYAN=(255,255,0)
BLUE=(255,0,0)
RED=(0,0,255)
GREEN=(0,255,0)
YELLOW=(0,255,255)
def get_list_dir_in_folder(dir):
sub_dir = [o for o in os.listdir(dir) if os.path.isdir(os.path.join(dir, o))]
return sub_dir
def get_list_file_in_folder(dir, ext='jpg'):
included_extensions = [ext]
file_names = [fn for fn in os.listdir(dir)
if any(fn.endswith(ext) for ext in included_extensions)]
return file_names
def DrawFace(win,img):
width = 2
x1 = win.x
y1 = win.y
x2 = win.width + win.x - 1
y2 = win.width + win.y - 1
centerX = (x1 + x2) / 2
centerY = (y1 + y2) / 2
angle = win.angle
R = cv2.getRotationMatrix2D((centerX,centerY),angle,1)
pts = np.array([[x1,y1,1],[x1,y2,1],[x2,y2,1],[x2,y1,1]], np.int32)
pts = (pts @ R.T).astype(int) #Rotate points
pts = pts.reshape((-1,1,2))
cv2.polylines(img,[pts],True,CYAN,width)
cv2.line(img, (pts[0][0][0],pts[0][0][1]), (pts[3][0][0],pts[3][0][1]), BLUE, width)
def DrawPoints(win,img):
width = 3
f = FeatEnam.NOSE
cv2.circle(img,(win.points[f].x,win.points[f].y),width,GREEN,-1)
f = FeatEnam.EYE_LEFT
cv2.circle(img,(win.points[f].x,win.points[f].y),width,YELLOW,-1)
f = FeatEnam.EYE_RIGHT
cv2.circle(img,(win.points[f].x,win.points[f].y),width,YELLOW,-1)
f = FeatEnam.MOUTH_LEFT
cv2.circle(img,(win.points[f].x,win.points[f].y),width,RED,-1)
f = FeatEnam.MOUTH_RIGHT
cv2.circle(img,(win.points[f].x,win.points[f].y),width,RED,-1)
for i in range(8):
cv2.circle(img,(win.points[i].x,win.points[i].y),width,BLUE,-1)
def SetThreadCount(threads):
os.environ['OMP_NUM_THREADS'] = str(threads)
def c_str(str_in):
return c_char_p(str_in.encode('utf-8'))
def initialize():
SetThreadCount(1)
path = '/usr/local/share/pcn/'
detection_model_path = c_str(path + "PCN.caffemodel")
pcn1_proto = c_str(path + "PCN-1.prototxt")
pcn2_proto = c_str(path + "PCN-2.prototxt")
pcn3_proto = c_str(path + "PCN-3.prototxt")
tracking_model_path = c_str(path + "PCN-Tracking.caffemodel")
tracking_proto = c_str(path + "PCN-Tracking.prototxt")
min_face_size=40 # minimum face size to detect >20
image_pyramid_scale_factor=1.45 # scaleing factor of image pyramid [1.4;1.6]
score_thres = (0.5, 0.5, 0.98) # score threshold of detected faces [0;1]
smooth=0 #Smooth the face boxes or not (smooth = true or false, recommend using it on video to get stabler face boxes)
tracking_period=30
tracking_thres=0.9
detector = init_detector(detection_model_path,
pcn1_proto, pcn2_proto, pcn3_proto,
tracking_model_path, tracking_proto,
min_face_size,
image_pyramid_scale_factor,
score_thres[0], score_thres[1], score_thres[2],
tracking_period, tracking_thres,
smooth)
return detector
def detect_dir(detector, input_dir, rotate=0):
print ()
print ('Begin test ',input_dir,'with angle',rotate)
output_dir = input_dir + '_result_'+str(rotate)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
outputNG_dir = output_dir + '/NG'
if not os.path.exists(outputNG_dir):
os.makedirs(outputNG_dir)
list_file = get_list_file_in_folder(input_dir)
list_file = sorted(list_file)
total = 0
detected = 0
for img_file in list_file:
frame = cv2.imread(os.path.join(input_dir, img_file))
frame = imutils.rotate_bound(frame, rotate)
width = frame.shape[1]
height = frame.shape[0]
face_count = c_int(0)
raw_data = frame.ctypes.data_as(POINTER(c_ubyte))
windows = detect_faces(detector, raw_data, int(height), int(width), pointer(face_count))
num_face = face_count.value
for i in range(num_face):
DrawFace(windows[i], frame)
DrawPoints(windows[i], frame)
free_faces(windows)
total += 1
resized = cv2.resize(frame, (int(width/2), int(height/2)), interpolation=cv2.INTER_CUBIC)
if (num_face < 1):
print('NG:', img_file, '---------------------------------')
cv2.imwrite(os.path.join(outputNG_dir, img_file), resized)
else:
#print('OK:', img_file)
cv2.imwrite(os.path.join(output_dir,img_file),resized)
detected += 1
# cv2.imshow('PCN', frame)
# if cv2.waitKey(0) & 0xFF == ord('q'):
# break
print(rotate, ', Detected:', detected, ', Total:', total, ', Accuracy:', float(detected) / float(total))
import shutil
# shutil.copy('/home/atsg/PycharmProjects/face_recognition/FaceKit/PCN/PyPCN_new.py', os.path.join(output_dir,'PyPCN_New.py'))
def detect_cam(detector):
if len(sys.argv)==2:
cap = cv2.VideoCapture(sys.argv[1])
else:
cap = cv2.VideoCapture(0)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = cap.get(cv2.CAP_PROP_FPS)
while cap.isOpened():
ret, frame = cap.read()
if frame.shape[0] == 0:
break
start = time.time()
face_count = c_int(0)
raw_data = frame.ctypes.data_as(POINTER(c_ubyte))
windows = detect_faces(detector, raw_data,
int(height), int(width),
pointer(face_count))
end = time.time()
for i in range(face_count.value):
DrawFace(windows[i], frame)
DrawPoints(windows[i], frame)
free_faces(windows)
fps = int(1 / (end - start))
cv2.putText(frame, str(fps) + "fps", (20, 45), 4, 1, (0, 0, 125))
cv2.imshow('PCN', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__=="__main__":
detector = initialize()
#detect_cam(detector)
input_dir = '/home/atsg/PycharmProjects/face_recognition/tiepnh/OK'
for i in range(0,360,45):
detect_dir(detector, input_dir, rotate=i)
free_detector(detector)
| [
"os.path.exists",
"os.listdir",
"os.makedirs",
"cv2.polylines",
"cv2.line",
"os.path.join",
"cv2.imshow",
"numpy.array",
"cv2.circle",
"cv2.waitKey",
"imutils.rotate_bound",
"cv2.VideoCapture",
"cv2.getRotationMatrix2D",
"time.time"
] | [((2824, 2877), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(centerX, centerY)', 'angle', '(1)'], {}), '((centerX, centerY), angle, 1)\n', (2847, 2877), False, 'import cv2\n'), ((2885, 2957), 'numpy.array', 'np.array', (['[[x1, y1, 1], [x1, y2, 1], [x2, y2, 1], [x2, y1, 1]]', 'np.int32'], {}), '([[x1, y1, 1], [x1, y2, 1], [x2, y2, 1], [x2, y1, 1]], np.int32)\n', (2893, 2957), True, 'import numpy as np\n'), ((3032, 3076), 'cv2.polylines', 'cv2.polylines', (['img', '[pts]', '(True)', 'CYAN', 'width'], {}), '(img, [pts], True, CYAN, width)\n', (3045, 3076), False, 'import cv2\n'), ((3077, 3167), 'cv2.line', 'cv2.line', (['img', '(pts[0][0][0], pts[0][0][1])', '(pts[3][0][0], pts[3][0][1])', 'BLUE', 'width'], {}), '(img, (pts[0][0][0], pts[0][0][1]), (pts[3][0][0], pts[3][0][1]),\n BLUE, width)\n', (3085, 3167), False, 'import cv2\n'), ((3230, 3299), 'cv2.circle', 'cv2.circle', (['img', '(win.points[f].x, win.points[f].y)', 'width', 'GREEN', '(-1)'], {}), '(img, (win.points[f].x, win.points[f].y), width, GREEN, -1)\n', (3240, 3299), False, 'import cv2\n'), ((3325, 3395), 'cv2.circle', 'cv2.circle', (['img', '(win.points[f].x, win.points[f].y)', 'width', 'YELLOW', '(-1)'], {}), '(img, (win.points[f].x, win.points[f].y), width, YELLOW, -1)\n', (3335, 3395), False, 'import cv2\n'), ((3422, 3492), 'cv2.circle', 'cv2.circle', (['img', '(win.points[f].x, win.points[f].y)', 'width', 'YELLOW', '(-1)'], {}), '(img, (win.points[f].x, win.points[f].y), width, YELLOW, -1)\n', (3432, 3492), False, 'import cv2\n'), ((3520, 3587), 'cv2.circle', 'cv2.circle', (['img', '(win.points[f].x, win.points[f].y)', 'width', 'RED', '(-1)'], {}), '(img, (win.points[f].x, win.points[f].y), width, RED, -1)\n', (3530, 3587), False, 'import cv2\n'), ((3616, 3683), 'cv2.circle', 'cv2.circle', (['img', '(win.points[f].x, win.points[f].y)', 'width', 'RED', '(-1)'], {}), '(img, (win.points[f].x, win.points[f].y), width, RED, -1)\n', (3626, 3683), False, 'import cv2\n'), ((905, 948), 'os.path.join', 'os.path.join', (['lib_dir', '"""libPCN_tracking.so"""'], {}), "(lib_dir, 'libPCN_tracking.so')\n", (917, 948), False, 'import os\n'), ((970, 1016), 'os.path.join', 'os.path.join', (['lib_dir', '"""libPCN_no_tracking.so"""'], {}), "(lib_dir, 'libPCN_no_tracking.so')\n", (982, 1016), False, 'import os\n'), ((3710, 3778), 'cv2.circle', 'cv2.circle', (['img', '(win.points[i].x, win.points[i].y)', 'width', 'BLUE', '(-1)'], {}), '(img, (win.points[i].x, win.points[i].y), width, BLUE, -1)\n', (3720, 3778), False, 'import cv2\n'), ((5363, 5389), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (5377, 5389), False, 'import os\n'), ((5399, 5422), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (5410, 5422), False, 'import os\n'), ((5473, 5501), 'os.path.exists', 'os.path.exists', (['outputNG_dir'], {}), '(outputNG_dir)\n', (5487, 5501), False, 'import os\n'), ((5511, 5536), 'os.makedirs', 'os.makedirs', (['outputNG_dir'], {}), '(outputNG_dir)\n', (5522, 5536), False, 'import os\n'), ((5764, 5799), 'imutils.rotate_bound', 'imutils.rotate_bound', (['frame', 'rotate'], {}), '(frame, rotate)\n', (5784, 5799), False, 'import imutils\n'), ((7091, 7120), 'cv2.VideoCapture', 'cv2.VideoCapture', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (7107, 7120), False, 'import cv2\n'), ((7145, 7164), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (7161, 7164), False, 'import cv2\n'), ((7420, 7431), 'time.time', 'time.time', ([], {}), '()\n', (7429, 7431), False, 'import time\n'), ((7694, 7705), 'time.time', 'time.time', ([], {}), '()\n', (7703, 7705), False, 'import time\n'), ((7977, 8001), 'cv2.imshow', 'cv2.imshow', (['"""PCN"""', 'frame'], {}), "('PCN', frame)\n", (7987, 8001), False, 'import cv2\n'), ((2310, 2325), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (2320, 2325), False, 'import os\n'), ((2495, 2510), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (2505, 2510), False, 'import os\n'), ((5713, 5746), 'os.path.join', 'os.path.join', (['input_dir', 'img_file'], {}), '(input_dir, img_file)\n', (5725, 5746), False, 'import os\n'), ((2343, 2363), 'os.path.join', 'os.path.join', (['dir', 'o'], {}), '(dir, o)\n', (2355, 2363), False, 'import os\n'), ((6472, 6508), 'os.path.join', 'os.path.join', (['outputNG_dir', 'img_file'], {}), '(outputNG_dir, img_file)\n', (6484, 6508), False, 'import os\n'), ((6593, 6627), 'os.path.join', 'os.path.join', (['output_dir', 'img_file'], {}), '(output_dir, img_file)\n', (6605, 6627), False, 'import os\n'), ((8013, 8027), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (8024, 8027), False, 'import cv2\n')] |
# Pseudocolor any grayscale image
import os
import cv2
import numpy as np
from matplotlib import pyplot as plt
from plantcv.plantcv import params
from plantcv.plantcv import plot_image
from plantcv.plantcv import fatal_error
def pseudocolor(gray_img, obj=None, mask=None, cmap=None, background="image", min_value=0, max_value=255, dpi=None,
axes=True, colorbar=True):
"""Pseudocolor any grayscale image to custom colormap
Inputs:
gray_img = grayscale image data
obj = if provided, the pseudocolored image gets cropped down to the region of interest
mask = binary mask
cmap = colormap
background = background color/type, options are "image" (gray_img), "white", or "black"
(a mask must be supplied)
min_value = minimum value for range of interest
max_value = maximum value for range of interest
dpi = dots per inch, (optional, if dpi=None then the matplotlib default is used, 100 dpi)
axes = if False then x- and y-axis won't be displayed, nor will the title
colorbar = if False then colorbar won't be displayed
Returns:
pseudo_image = pseudocolored image
:param gray_img: numpy.ndarray
:param obj: numpy.ndarray
:param mask: numpy.ndarray
:param cmap: str
:param background: str
:param min_value: numeric
:param max_value: numeric
:param dpi: int
:param axes: bool
:return pseudo_image: numpy.ndarray
"""
# Auto-increment the device counter
params.device += 1
# Make copies of the gray image
gray_img1 = np.copy(gray_img)
# Check if the image is grayscale
if len(np.shape(gray_img)) != 2:
fatal_error("Image must be grayscale.")
# Apply the mask if given
if mask is not None:
if obj is not None:
# Copy the image
img_copy = np.copy(gray_img1)
# Extract contour size
x, y, w, h = cv2.boundingRect(obj)
cv2.rectangle(img_copy, (x, y), (x + w, y + h), (0, 255, 0), 5)
# Crop down the image
crop_img = gray_img[y:y + h, x:x + w]
# Calculate the buffer size based on the contour size
offsetx = int(w / 5)
offsety = int(h / 5)
if background.upper() == "IMAGE":
gray_img1 = gray_img1[y - offsety:y + h + offsety, x - offsetx:x + w + offsetx]
else:
# Crop img including buffer
gray_img1 = cv2.copyMakeBorder(crop_img, offsety, offsety, offsetx, offsetx, cv2.BORDER_CONSTANT,
value=(0, 0, 0))
# Crop the mask to the same size as the image
crop_mask = mask[y:y + h, x:x + w]
mask = cv2.copyMakeBorder(crop_mask, offsety, offsety, offsetx, offsetx, cv2.BORDER_CONSTANT,
value=(0, 0, 0))
# Apply the mask
masked_img = np.ma.array(gray_img1, mask=~mask.astype(np.bool))
# Set the background color or type
if background.upper() == "BLACK":
# Background is all zeros
bkg_img = np.zeros(np.shape(gray_img1), dtype=np.uint8)
# Use the gray cmap for the background
bkg_cmap = "gray"
elif background.upper() == "WHITE":
# Background is all 255 (white)
bkg_img = np.zeros(np.shape(gray_img1), dtype=np.uint8)
bkg_img += 255
bkg_cmap = "gray"
elif background.upper() == "IMAGE":
# Set the background to the input gray image
bkg_img = gray_img1
bkg_cmap = "gray"
else:
fatal_error(
"Background type {0} is not supported. Please use 'white', 'black', or 'image'.".format(background))
# Pseudocolor the image, plot the background first
pseudo_img1 = plt.imshow(bkg_img, cmap=bkg_cmap, vmin=min_value, vmax=max_value)
# Overlay the masked grayscale image with the user input colormap
plt.imshow(masked_img, cmap=cmap, vmin=min_value, vmax=max_value)
if colorbar:
plt.colorbar(fraction=0.033, pad=0.04)
if axes:
# Include image title
plt.title('Pseudocolored image')
else:
# Remove axes
plt.xticks([])
plt.yticks([])
# Store the current figure
pseudo_img = plt.gcf()
# Print or plot if debug is turned on
if params.debug == 'print':
plt.savefig(os.path.join(params.debug_outdir, str(params.device) + '_pseudocolored.png'), dpi=dpi)
plt.close()
elif params.debug == 'plot':
plot_image(pseudo_img1)
# Use non-blocking mode in case the function is run more than once
plt.show(block=False)
elif params.debug == None:
plt.show(block=False)
else:
# Pseudocolor the image
pseudo_img1 = plt.imshow(gray_img1, cmap=cmap, vmin=min_value, vmax=max_value)
if colorbar:
# Include the colorbar
plt.colorbar(fraction=0.033, pad=0.04)
if axes:
# Include image title
plt.title('Pseudocolored image') # + os.path.splitext(filename)[0])
else:
# Remove axes
plt.xticks([])
plt.yticks([])
pseudo_img = plt.gcf()
# Print or plot if debug is turned on
if params.debug == 'print':
plt.savefig(os.path.join(params.debug_outdir, str(params.device) + '_pseudocolored.png'), dpi=dpi)
pseudo_img.clear()
plt.close()
elif params.debug == 'plot':
plot_image(pseudo_img1)
# Use non-blocking mode in case the function is run more than once
plt.show(block=False)
elif params.debug == None:
plt.show(block=False)
return pseudo_img
| [
"matplotlib.pyplot.imshow",
"numpy.copy",
"cv2.rectangle",
"plantcv.plantcv.fatal_error",
"matplotlib.pyplot.xticks",
"plantcv.plantcv.plot_image",
"matplotlib.pyplot.gcf",
"cv2.copyMakeBorder",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.show",
"matplotlib.pyplot.close",
"matplotlib.pypl... | [((1610, 1627), 'numpy.copy', 'np.copy', (['gray_img'], {}), '(gray_img)\n', (1617, 1627), True, 'import numpy as np\n'), ((1712, 1751), 'plantcv.plantcv.fatal_error', 'fatal_error', (['"""Image must be grayscale."""'], {}), "('Image must be grayscale.')\n", (1723, 1751), False, 'from plantcv.plantcv import fatal_error\n'), ((3918, 3984), 'matplotlib.pyplot.imshow', 'plt.imshow', (['bkg_img'], {'cmap': 'bkg_cmap', 'vmin': 'min_value', 'vmax': 'max_value'}), '(bkg_img, cmap=bkg_cmap, vmin=min_value, vmax=max_value)\n', (3928, 3984), True, 'from matplotlib import pyplot as plt\n'), ((4067, 4132), 'matplotlib.pyplot.imshow', 'plt.imshow', (['masked_img'], {'cmap': 'cmap', 'vmin': 'min_value', 'vmax': 'max_value'}), '(masked_img, cmap=cmap, vmin=min_value, vmax=max_value)\n', (4077, 4132), True, 'from matplotlib import pyplot as plt\n'), ((4454, 4463), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4461, 4463), True, 'from matplotlib import pyplot as plt\n'), ((5002, 5066), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gray_img1'], {'cmap': 'cmap', 'vmin': 'min_value', 'vmax': 'max_value'}), '(gray_img1, cmap=cmap, vmin=min_value, vmax=max_value)\n', (5012, 5066), True, 'from matplotlib import pyplot as plt\n'), ((5424, 5433), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5431, 5433), True, 'from matplotlib import pyplot as plt\n'), ((1678, 1696), 'numpy.shape', 'np.shape', (['gray_img'], {}), '(gray_img)\n', (1686, 1696), True, 'import numpy as np\n'), ((1888, 1906), 'numpy.copy', 'np.copy', (['gray_img1'], {}), '(gray_img1)\n', (1895, 1906), True, 'import numpy as np\n'), ((1967, 1988), 'cv2.boundingRect', 'cv2.boundingRect', (['obj'], {}), '(obj)\n', (1983, 1988), False, 'import cv2\n'), ((2001, 2064), 'cv2.rectangle', 'cv2.rectangle', (['img_copy', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(5)'], {}), '(img_copy, (x, y), (x + w, y + h), (0, 255, 0), 5)\n', (2014, 2064), False, 'import cv2\n'), ((2791, 2899), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['crop_mask', 'offsety', 'offsety', 'offsetx', 'offsetx', 'cv2.BORDER_CONSTANT'], {'value': '(0, 0, 0)'}), '(crop_mask, offsety, offsety, offsetx, offsetx, cv2.\n BORDER_CONSTANT, value=(0, 0, 0))\n', (2809, 2899), False, 'import cv2\n'), ((4167, 4205), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'fraction': '(0.033)', 'pad': '(0.04)'}), '(fraction=0.033, pad=0.04)\n', (4179, 4205), True, 'from matplotlib import pyplot as plt\n'), ((4270, 4302), 'matplotlib.pyplot.title', 'plt.title', (['"""Pseudocolored image"""'], {}), "('Pseudocolored image')\n", (4279, 4302), True, 'from matplotlib import pyplot as plt\n'), ((4355, 4369), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4365, 4369), True, 'from matplotlib import pyplot as plt\n'), ((4382, 4396), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4392, 4396), True, 'from matplotlib import pyplot as plt\n'), ((4670, 4681), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4679, 4681), True, 'from matplotlib import pyplot as plt\n'), ((5136, 5174), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'fraction': '(0.033)', 'pad': '(0.04)'}), '(fraction=0.033, pad=0.04)\n', (5148, 5174), True, 'from matplotlib import pyplot as plt\n'), ((5239, 5271), 'matplotlib.pyplot.title', 'plt.title', (['"""Pseudocolored image"""'], {}), "('Pseudocolored image')\n", (5248, 5271), True, 'from matplotlib import pyplot as plt\n'), ((5360, 5374), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (5370, 5374), True, 'from matplotlib import pyplot as plt\n'), ((5387, 5401), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (5397, 5401), True, 'from matplotlib import pyplot as plt\n'), ((5671, 5682), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5680, 5682), True, 'from matplotlib import pyplot as plt\n'), ((2516, 2623), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['crop_img', 'offsety', 'offsety', 'offsetx', 'offsetx', 'cv2.BORDER_CONSTANT'], {'value': '(0, 0, 0)'}), '(crop_img, offsety, offsety, offsetx, offsetx, cv2.\n BORDER_CONSTANT, value=(0, 0, 0))\n', (2534, 2623), False, 'import cv2\n'), ((3186, 3205), 'numpy.shape', 'np.shape', (['gray_img1'], {}), '(gray_img1)\n', (3194, 3205), True, 'import numpy as np\n'), ((4731, 4754), 'plantcv.plantcv.plot_image', 'plot_image', (['pseudo_img1'], {}), '(pseudo_img1)\n', (4741, 4754), False, 'from plantcv.plantcv import plot_image\n'), ((4846, 4867), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (4854, 4867), True, 'from matplotlib import pyplot as plt\n'), ((5732, 5755), 'plantcv.plantcv.plot_image', 'plot_image', (['pseudo_img1'], {}), '(pseudo_img1)\n', (5742, 5755), False, 'from plantcv.plantcv import plot_image\n'), ((5847, 5868), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (5855, 5868), True, 'from matplotlib import pyplot as plt\n'), ((3423, 3442), 'numpy.shape', 'np.shape', (['gray_img1'], {}), '(gray_img1)\n', (3431, 3442), True, 'import numpy as np\n'), ((4915, 4936), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (4923, 4936), True, 'from matplotlib import pyplot as plt\n'), ((5916, 5937), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (5924, 5937), True, 'from matplotlib import pyplot as plt\n')] |
import numpy as np
import matplotlib
matplotlib.use('PDF')
import matplotlib.pyplot as plt
from scipy.stats import beta as Beta
i=9
n=10
alpha=5
beta=5
samples=np.random.choice(2, n, replace=True, p=[0.3,0.7])
k=len([y for y in samples if y==1])
#x-axis values
x=np.linspace(0,1, 100)
#r'$\alpha=1, \beta$=1'
plt.title("alpha="+str(alpha)+", β="+str(beta)+", n="+str(n))
#prior
plt.plot(x,Beta.pdf(x,alpha,beta),'b-')
#posterior
plt.plot(x,Beta.pdf(x,alpha+k,beta+(n-k)),'r-')
plt.xlabel('θ')
plt.ylabel('f(θ)')
#plt.show()
plt.savefig('C://Users//Tristan_local//Desktop//Figure_'+str(i)+'.png', format='png',alpha='0') | [
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"numpy.random.choice",
"matplotlib.pyplot.xlabel",
"numpy.linspace",
"scipy.stats.beta.pdf"
] | [((37, 58), 'matplotlib.use', 'matplotlib.use', (['"""PDF"""'], {}), "('PDF')\n", (51, 58), False, 'import matplotlib\n'), ((164, 214), 'numpy.random.choice', 'np.random.choice', (['(2)', 'n'], {'replace': '(True)', 'p': '[0.3, 0.7]'}), '(2, n, replace=True, p=[0.3, 0.7])\n', (180, 214), True, 'import numpy as np\n'), ((268, 290), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (279, 290), True, 'import numpy as np\n'), ((484, 499), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""θ"""'], {}), "('θ')\n", (494, 499), True, 'import matplotlib.pyplot as plt\n'), ((508, 526), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""f(θ)"""'], {}), "('f(θ)')\n", (518, 526), True, 'import matplotlib.pyplot as plt\n'), ((396, 420), 'scipy.stats.beta.pdf', 'Beta.pdf', (['x', 'alpha', 'beta'], {}), '(x, alpha, beta)\n', (404, 420), True, 'from scipy.stats import beta as Beta\n'), ((447, 485), 'scipy.stats.beta.pdf', 'Beta.pdf', (['x', '(alpha + k)', '(beta + (n - k))'], {}), '(x, alpha + k, beta + (n - k))\n', (455, 485), True, 'from scipy.stats import beta as Beta\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2020 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the wind_direction.WindDirection plugin."""
import unittest
import numpy as np
from cf_units import Unit
from iris.coords import DimCoord
from iris.cube import Cube
from iris.tests import IrisTest
from improver.wind_calculations.wind_direction import WindDirection
from ...set_up_test_cubes import set_up_variable_cube
# Data to test complex/degree handling functions.
# Complex angles equivalent to np.arange(0., 360, 10) degrees.
COMPLEX_ANGLES = np.array(
[
1.0 + 0j,
0.984807753 + 0.173648178j,
0.939692621 + 0.342020143j,
0.866025404 + 0.5j,
0.766044443 + 0.642787610j,
0.642787610 + 0.766044443j,
0.5 + 0.866025404j,
0.342020143 + 0.939692621j,
0.173648178 + 0.984807753j,
0.0 + 1.0j,
-0.173648178 + 0.984807753j,
-0.342020143 + 0.939692621j,
-0.5 + 0.866025404j,
-0.642787610 + 0.766044443j,
-0.766044443 + 0.642787610j,
-0.866025404 + 0.5j,
-0.939692621 + 0.342020143j,
-0.984807753 + 0.173648178j,
-1.0 + 0.0j,
-0.984807753 - 0.173648178j,
-0.939692621 - 0.342020143j,
-0.866025404 - 0.5j,
-0.766044443 - 0.642787610j,
-0.642787610 - 0.766044443j,
-0.5 - 0.866025404j,
-0.342020143 - 0.939692621j,
-0.173648178 - 0.984807753j,
-0.0 - 1.0j,
0.173648178 - 0.984807753j,
0.342020143 - 0.939692621j,
0.5 - 0.866025404j,
0.642787610 - 0.766044443j,
0.766044443 - 0.642787610j,
0.866025404 - 0.5j,
0.939692621 - 0.342020143j,
0.984807753 - 0.173648178j,
]
)
# Data to test the ensemble averaging codes.
WIND_DIR_COMPLEX = np.array(
[
[
[6.12323400e-17 + 1.0j, 0.642787610 + 0.76604444j],
[-1.83697020e-16 - 1.0j, 0.984807753 - 0.17364818j],
],
[
[-1.83697020e-16 - 1.0j, 0.5 + 0.8660254j],
[0.342020143 - 0.93969262j, 0.984807753 + 0.17364818j],
],
]
)
def make_wdir_cube_534():
"""Make a 5x3x4 wind direction cube for testing this plugin"""
data = np.array(
[
[
[170.0, 50.0, 90.0, 90.0],
[170.0, 170.0, 47.0, 350.0],
[10.0, 309.0, 10.0, 10.0],
],
[
[170.0, 50.0, 90.0, 90.0],
[170.0, 170.0, 47.0, 47.0],
[10.0, 10.0, 10.0, 10.0],
],
[
[10.0, 50.0, 90.0, 90.0],
[170.0, 170.0, 47.0, 47.0],
[310.0, 309.0, 10.0, 10.0],
],
[
[190.0, 40.0, 270.0, 90.0],
[170.0, 170.0, 47.0, 47.0],
[310.0, 309.0, 10.0, 10.0],
],
[
[190.0, 40.0, 270.0, 270.0],
[170.0, 170.0, 47.0, 47.0],
[310.0, 309.0, 10.0, 10.0],
],
],
dtype=np.float32,
)
cube = set_up_variable_cube(
data, name="wind_from_direction", units="degrees", spatial_grid="equalarea"
)
return cube
def make_wdir_cube_222():
"""Make a 2x2x2 wind direction cube for testing this plugin"""
data = np.array(
[[[90.0, 50.0], [270.0, 350.0]], [[270.0, 60.0], [290.0, 10.0]]],
dtype=np.float32,
)
cube = set_up_variable_cube(
data, name="wind_from_direction", units="degrees", spatial_grid="equalarea"
)
return cube
def pad_wdir_cube_222():
"""Make a padded wind direction cube using the same data as make_wdir_cube().
Original data: 2x2x2; padded data 2x10x10"""
data = np.array(
[[[90.0, 50.0], [270.0, 350.0]], [[270.0, 60.0], [290.0, 10.0]]],
dtype=np.float32,
)
padded_data = np.pad(
data, ((0, 0), (4, 4), (4, 4)), "constant", constant_values=(0.0, 0.0)
)
cube = set_up_variable_cube(
padded_data.astype(np.float32),
name="wind_from_direction",
units="degrees",
spatial_grid="equalarea",
)
cube.coord(axis="x").points = np.arange(-50000.0, -31000.0, 2000.0)
cube.coord(axis="y").points = np.arange(0.0, 19000.0, 2000.0)
return cube
class Test__init__(IrisTest):
"""Test the init method."""
def test_basic(self):
"""Test that the __init__ does not fail."""
result = WindDirection()
self.assertIsInstance(result, WindDirection)
def test_backup_method(self):
"""Test that the __init__ accepts this keyword."""
result = WindDirection(backup_method="neighbourhood")
self.assertIsInstance(result, WindDirection)
def test_invalid_method(self):
"""Test that the __init__ fails when an unrecognised option is given"""
msg = "Invalid option for keyword backup_method "
with self.assertRaisesRegex(ValueError, msg):
WindDirection(backup_method="invalid")
class Test__repr__(IrisTest):
"""Test the repr method."""
def test_basic(self):
"""Test that the __repr__ returns the expected string."""
result = str(WindDirection())
msg = (
'<WindDirection: backup_method "neighbourhood"; neighbourhood '
'radius "6000.0"m>'
)
self.assertEqual(result, msg)
# Test the complex number handling functions.
class Test_deg_to_complex(IrisTest):
"""Test the deg_to_complex function."""
def test_converts_single(self):
"""Tests that degree angle value is converted to complex."""
expected_out = 0.707106781187 + 0.707106781187j
result = WindDirection().deg_to_complex(45.0)
self.assertAlmostEqual(result, expected_out)
def test_handles_angle_wrap(self):
"""Test that code correctly handles 360 and 0 degrees."""
expected_out = 1 + 0j
result = WindDirection().deg_to_complex(0)
self.assertAlmostEqual(result, expected_out)
expected_out = 1 - 0j
result = WindDirection().deg_to_complex(360)
self.assertAlmostEqual(result, expected_out)
def test_converts_array(self):
"""Tests that array of floats is converted to complex array."""
result = WindDirection().deg_to_complex(np.arange(0.0, 360, 10))
self.assertIsInstance(result, np.ndarray)
self.assertArrayAlmostEqual(result, COMPLEX_ANGLES)
class Test_complex_to_deg(IrisTest):
"""Test the complex_to_deg function."""
def test_fails_if_data_is_not_array(self):
"""Test code raises a Type Error if input data not an array."""
input_data = 0 - 1j
msg = "Input data is not a numpy array, but" " {}".format(type(input_data))
with self.assertRaisesRegex(TypeError, msg):
WindDirection().complex_to_deg(input_data)
def test_handles_angle_wrap(self):
"""Test that code correctly handles 360 and 0 degrees."""
# Input is complex for 0 and 360 deg - both should return 0.0.
input_data = np.array([1 + 0j, 1 - 0j])
result = WindDirection().complex_to_deg(input_data)
self.assertTrue((result == 0.0).all())
def test_converts_array(self):
"""Tests that array of complex values are converted to degrees."""
result = WindDirection().complex_to_deg(COMPLEX_ANGLES)
self.assertIsInstance(result, np.ndarray)
self.assertArrayAlmostEqual(result, np.arange(0.0, 360, 10))
class Test_complex_to_deg_roundtrip(IrisTest):
"""Test the complex_to_deg and deg_to_complex functions together."""
def setUp(self):
"""Initialise plugin and supply data for tests"""
self.plugin = WindDirection()
self.cube = make_wdir_cube_534()
def test_from_deg(self):
"""Tests that array of values are converted to complex and back."""
tmp_complex = self.plugin.deg_to_complex(self.cube.data)
result = self.plugin.complex_to_deg(tmp_complex)
self.assertArrayAlmostEqual(result, self.cube.data, decimal=4)
def test_from_complex(self):
"""Tests that array of values are converted to degrees and back."""
tmp_degrees = self.plugin.complex_to_deg(COMPLEX_ANGLES)
result = self.plugin.deg_to_complex(tmp_degrees)
self.assertArrayAlmostEqual(result, COMPLEX_ANGLES)
class Test_calc_wind_dir_mean(IrisTest):
"""Test the calc_wind_dir_mean function."""
def setUp(self):
"""Initialise plugin and supply data for tests"""
self.plugin = WindDirection()
# 5x3x4 3D Array containing wind direction in angles.
cube = make_wdir_cube_534()
self.plugin.wdir_complex = self.plugin.deg_to_complex(cube.data)
self.plugin.wdir_slice_mean = next(cube.slices_over("realization"))
self.plugin.realization_axis = 0
self.expected_wind_mean = np.array(
[
[176.636276, 46.002445, 90.0, 90.0],
[170.0, 170.0, 47.0, 36.544231],
[333.413239, 320.035217, 10.0, 10.0],
],
dtype=np.float32,
)
def test_complex(self):
"""Test that the function defines correct complex mean."""
self.plugin.calc_wind_dir_mean()
result = self.plugin.wdir_mean_complex
expected_complex = self.plugin.deg_to_complex(
self.expected_wind_mean, radius=np.absolute(result)
)
self.assertArrayAlmostEqual(result, expected_complex)
def test_degrees(self):
"""Test that the function defines correct degrees cube."""
self.plugin.calc_wind_dir_mean()
result = self.plugin.wdir_slice_mean
self.assertIsInstance(result, Cube)
self.assertIsInstance(result.data, np.ndarray)
self.assertArrayAlmostEqual(result.data, self.expected_wind_mean, decimal=4)
class Test_find_r_values(IrisTest):
"""Test the find_r_values function."""
def setUp(self):
"""Initialise plugin and supply data for tests"""
self.plugin = WindDirection()
def test_converts_single(self):
"""Tests that r-value is correctly extracted from complex value."""
# Attach a cube for the plugin to copy in creating the resulting cube:
self.plugin.wdir_slice_mean = make_wdir_cube_222()[0][0][0]
expected_out = 2.0
# Set-up complex values for angle=45 and r=2
self.plugin.wdir_mean_complex = 1.4142135624 + 1.4142135624j
self.plugin.find_r_values()
self.assertAlmostEqual(self.plugin.r_vals_slice.data, expected_out)
def test_converts_array(self):
"""Test that code can find r-values from array of complex numbers."""
longitude = DimCoord(
np.linspace(-180, 180, 36), standard_name="longitude", units="degrees"
)
cube = Cube(
COMPLEX_ANGLES,
standard_name="wind_from_direction",
dim_coords_and_dims=[(longitude, 0)],
units="degree",
)
# Attach a cube for the plugin to copy in creating the resulting cube:
self.plugin.wdir_slice_mean = cube
self.plugin.wdir_mean_complex = COMPLEX_ANGLES
expected_out = np.ones(COMPLEX_ANGLES.shape, dtype=np.float32)
self.plugin.find_r_values()
result = self.plugin.r_vals_slice.data
self.assertIsInstance(result, np.ndarray)
self.assertArrayAlmostEqual(result, expected_out)
class Test_calc_confidence_measure(IrisTest):
"""Test the calc_avg_dist_mean function returns confidence values."""
def setUp(self):
"""Initialise plugin and supply data for tests"""
self.plugin = WindDirection()
self.plugin.wdir_complex = WIND_DIR_COMPLEX
self.plugin.realization_axis = 0
rvals = np.array(
[[6.12323400e-17, 0.996194698], [0.984807753, 0.984807753]],
dtype=np.float32,
)
self.plugin.r_vals_slice = set_up_variable_cube(
rvals, name="r_values", units="1", spatial_grid="equalarea"
)
wdir = np.array([[180.0, 55.0], [280.0, 0.0]], dtype=np.float32)
self.plugin.wdir_slice_mean = set_up_variable_cube(
wdir, name="wind_from_direction", units="degrees", spatial_grid="equalarea"
)
def test_returns_confidence(self):
"""First element has two angles directly opposite (90 & 270 degs).
Therefore the calculated mean angle of 180 degs is basically
meaningless. This code calculates a confidence measure based on how
far the individual ensemble realizationss are away from
the mean point."""
expected_out = np.array([[0.0, 0.95638061], [0.91284426, 0.91284426]])
self.plugin.calc_confidence_measure()
result = self.plugin.confidence_slice.data
self.assertIsInstance(result, np.ndarray)
self.assertArrayAlmostEqual(result, expected_out)
class Test_wind_dir_decider(IrisTest):
"""Test the wind_dir_decider function."""
def test_runs_function_1st_member(self):
"""First element has two angles directly opposite (90 & 270 degs).
Therefore the calculated mean angle of 180 degs is basically
meaningless with an r value of nearly zero. So the code substitutes the
wind direction taken from the first ensemble value in its place."""
cube = make_wdir_cube_222()
self.plugin = WindDirection(backup_method="first_realization")
self.plugin.wdir_complex = WIND_DIR_COMPLEX
self.plugin.realization_axis = 0
self.plugin.wdir_slice_mean = cube[0].copy(
data=np.array([[180.0, 55.0], [280.0, 0.0]])
)
self.plugin.wdir_mean_complex = self.plugin.deg_to_complex(
self.plugin.wdir_slice_mean.data
)
expected_out = np.array([[90.0, 55.0], [280.0, 0.0]])
where_low_r = np.array([[True, False], [False, False]])
self.plugin.wind_dir_decider(where_low_r, cube)
result = self.plugin.wdir_slice_mean.data
self.assertIsInstance(result, np.ndarray)
self.assertArrayAlmostEqual(result, expected_out)
def test_runs_function_nbhood(self):
"""First element has two angles directly opposite (90 & 270 degs).
Therefore the calculated mean angle of 180 degs is basically
meaningless with an r value of nearly zero. So the code substitutes the
wind direction taken using the neighbourhood method."""
expected_out = np.array([[354.91, 55.0], [280.0, 0.0]])
cube = pad_wdir_cube_222()
where_low_r = np.pad(
np.array([[True, False], [False, False]]),
((4, 4), (4, 4)),
"constant",
constant_values=(True, True),
)
wind_dir_deg_mean = np.array([[180.0, 55.0], [280.0, 0.0]])
self.plugin = WindDirection(backup_method="neighbourhood")
self.plugin.realization_axis = 0
self.plugin.n_realizations = 1
self.plugin.wdir_mean_complex = np.pad(
self.plugin.deg_to_complex(wind_dir_deg_mean),
((4, 4), (4, 4)),
"constant",
constant_values=(0.0, 0.0),
)
self.plugin.wdir_complex = np.pad(
WIND_DIR_COMPLEX,
((0, 0), (4, 4), (4, 4)),
"constant",
constant_values=(0.0 + 0.0j),
)
self.plugin.wdir_slice_mean = cube[0].copy(
data=np.pad(
wind_dir_deg_mean, ((4, 4), (4, 4)), "constant", constant_values=0.0
)
)
self.plugin.wind_dir_decider(where_low_r, cube)
result = self.plugin.wdir_slice_mean.data
self.assertIsInstance(result, np.ndarray)
self.assertArrayAlmostEqual(result[4:6, 4:6], expected_out, decimal=2)
class Test_process(IrisTest):
"""Test entire code handles a cube correctly."""
def setUp(self):
"""Create a cube with collapsable coordinates."""
self.cube = make_wdir_cube_534()
self.expected_wind_mean = np.array(
[
[176.63627625, 46.00244522, 90.0, 90.0],
[170.0, 170.0, 47.0, 36.54423141],
[333.41320801, 320.03521729, 10.0, 10.0],
],
dtype=np.float32,
)
self.expected_r_vals = np.array(
[
[0.5919044, 0.99634719, 0.2, 0.6],
[1.0, 1.0, 1.0, 0.92427504],
[0.87177974, 0.91385943, 1.0, 1.0],
],
dtype=np.float32,
)
self.expected_confidence_measure = np.array(
[
[0.73166388, 0.95813018, 0.6, 0.8],
[1.0, 1.0, 1.0, 0.84808648],
[0.75270665, 0.83861077, 1.0, 1.0],
],
dtype=np.float32,
)
def test_basic(self):
"""Test that the plugin returns expected data types. """
result_cube, r_vals_cube, confidence_measure_cube = WindDirection().process(
self.cube
)
self.assertIsInstance(result_cube, Cube)
self.assertIsInstance(r_vals_cube, Cube)
self.assertIsInstance(confidence_measure_cube, Cube)
def test_fails_if_data_is_not_cube(self):
"""Test code raises a Type Error if input cube is not a cube."""
input_data = 50.0
msg = "Wind direction input is not a cube, but" " {0}".format(type(input_data))
with self.assertRaisesRegex(TypeError, msg):
WindDirection().process(input_data)
def test_fails_if_data_is_not_convertible_to_degrees(self):
"""Test code raises a ValueError if input cube is not convertible to
degrees."""
data = np.array([[300.0, 270.0], [270.0, 300.0]], dtype=np.float32)
cube = set_up_variable_cube(data, name="air_temperature", units="K")
msg = "Input cube cannot be converted to degrees"
with self.assertRaisesRegex(ValueError, msg):
WindDirection().process(cube)
def test_return_single_precision(self):
"""Test that the function returns data of float32."""
result_cube, r_vals_cube, confidence_measure_cube = WindDirection().process(
self.cube
)
self.assertEqual(result_cube.dtype, np.float32)
self.assertEqual(r_vals_cube.dtype, np.float32)
self.assertEqual(confidence_measure_cube.dtype, np.float32)
def test_returns_expected_values(self):
"""Test that the function returns correct 2D arrays of floats. """
result_cube, r_vals_cube, confidence_measure_cube = WindDirection().process(
self.cube
)
result = result_cube.data
r_vals = r_vals_cube.data
confidence_measure = confidence_measure_cube.data
self.assertIsInstance(result, np.ndarray)
self.assertIsInstance(r_vals, np.ndarray)
self.assertIsInstance(confidence_measure, np.ndarray)
self.assertArrayAlmostEqual(result, self.expected_wind_mean, decimal=4)
self.assertArrayAlmostEqual(r_vals, self.expected_r_vals)
self.assertArrayAlmostEqual(
confidence_measure, self.expected_confidence_measure
)
def test_with_backup(self):
"""Test that wind_dir_decider is invoked to select a better value for
a low-confidence point."""
# create a low-confidence point
self.cube.data[:, 1, 1] = [0.0, 72.0, 144.0, 216.0, 288.0]
# set up a larger cube using a "neutral" pad value so that
# neighbourhood processing does not fail
data = np.full((5, 10, 10), 30.0, dtype=np.float32)
data[:, 3:6, 3:7] = self.cube.data[:, :, :].copy()
cube = set_up_variable_cube(
data, name="wind_from_direction", units="degrees", spatial_grid="equalarea"
)
cube.coord(axis="x").points = np.arange(-50000.0, -31000.0, 2000.0)
cube.coord(axis="y").points = np.arange(0.0, 19000.0, 2000.0)
self.expected_wind_mean[1, 1] = 30.0870
self.expected_r_vals[1, 1] = 2.665601e-08
self.expected_confidence_measure[1, 1] = 0.0
result_cube, r_vals_cube, confidence_measure_cube = WindDirection().process(
cube
)
result = result_cube.data[3:6, 3:7]
r_vals = r_vals_cube.data[3:6, 3:7]
confidence_measure = confidence_measure_cube.data[3:6, 3:7]
self.assertIsInstance(result, np.ndarray)
self.assertIsInstance(r_vals, np.ndarray)
self.assertIsInstance(confidence_measure, np.ndarray)
self.assertArrayAlmostEqual(result, self.expected_wind_mean, decimal=4)
self.assertArrayAlmostEqual(
confidence_measure, self.expected_confidence_measure
)
self.assertArrayAlmostEqual(r_vals, self.expected_r_vals)
if __name__ == "__main__":
unittest.main()
| [
"numpy.ones",
"numpy.absolute",
"improver.wind_calculations.wind_direction.WindDirection",
"numpy.array",
"numpy.linspace",
"unittest.main",
"numpy.full",
"numpy.pad",
"iris.cube.Cube",
"numpy.arange"
] | [((2134, 3096), 'numpy.array', 'np.array', (['[1.0 + 0.0j, 0.984807753 + 0.173648178j, 0.939692621 + 0.342020143j, \n 0.866025404 + 0.5j, 0.766044443 + 0.64278761j, 0.64278761 + \n 0.766044443j, 0.5 + 0.866025404j, 0.342020143 + 0.939692621j, \n 0.173648178 + 0.984807753j, 0.0 + 1.0j, -0.173648178 + 0.984807753j, -\n 0.342020143 + 0.939692621j, -0.5 + 0.866025404j, -0.64278761 + \n 0.766044443j, -0.766044443 + 0.64278761j, -0.866025404 + 0.5j, -\n 0.939692621 + 0.342020143j, -0.984807753 + 0.173648178j, -1.0 + 0.0j, -\n 0.984807753 - 0.173648178j, -0.939692621 - 0.342020143j, -0.866025404 -\n 0.5j, -0.766044443 - 0.64278761j, -0.64278761 - 0.766044443j, -0.5 - \n 0.866025404j, -0.342020143 - 0.939692621j, -0.173648178 - 0.984807753j,\n -0.0 - 1.0j, 0.173648178 - 0.984807753j, 0.342020143 - 0.939692621j, \n 0.5 - 0.866025404j, 0.64278761 - 0.766044443j, 0.766044443 - \n 0.64278761j, 0.866025404 - 0.5j, 0.939692621 - 0.342020143j, \n 0.984807753 - 0.173648178j]'], {}), '([1.0 + 0.0j, 0.984807753 + 0.173648178j, 0.939692621 + \n 0.342020143j, 0.866025404 + 0.5j, 0.766044443 + 0.64278761j, 0.64278761 +\n 0.766044443j, 0.5 + 0.866025404j, 0.342020143 + 0.939692621j, \n 0.173648178 + 0.984807753j, 0.0 + 1.0j, -0.173648178 + 0.984807753j, -\n 0.342020143 + 0.939692621j, -0.5 + 0.866025404j, -0.64278761 + \n 0.766044443j, -0.766044443 + 0.64278761j, -0.866025404 + 0.5j, -\n 0.939692621 + 0.342020143j, -0.984807753 + 0.173648178j, -1.0 + 0.0j, -\n 0.984807753 - 0.173648178j, -0.939692621 - 0.342020143j, -0.866025404 -\n 0.5j, -0.766044443 - 0.64278761j, -0.64278761 - 0.766044443j, -0.5 - \n 0.866025404j, -0.342020143 - 0.939692621j, -0.173648178 - 0.984807753j,\n -0.0 - 1.0j, 0.173648178 - 0.984807753j, 0.342020143 - 0.939692621j, \n 0.5 - 0.866025404j, 0.64278761 - 0.766044443j, 0.766044443 - \n 0.64278761j, 0.866025404 - 0.5j, 0.939692621 - 0.342020143j, \n 0.984807753 - 0.173648178j])\n', (2142, 3096), True, 'import numpy as np\n'), ((3407, 3630), 'numpy.array', 'np.array', (['[[[6.123234e-17 + 1.0j, 0.64278761 + 0.76604444j], [-1.8369702e-16 - 1.0j, \n 0.984807753 - 0.17364818j]], [[-1.8369702e-16 - 1.0j, 0.5 + 0.8660254j],\n [0.342020143 - 0.93969262j, 0.984807753 + 0.17364818j]]]'], {}), '([[[6.123234e-17 + 1.0j, 0.64278761 + 0.76604444j], [-1.8369702e-16 -\n 1.0j, 0.984807753 - 0.17364818j]], [[-1.8369702e-16 - 1.0j, 0.5 + \n 0.8660254j], [0.342020143 - 0.93969262j, 0.984807753 + 0.17364818j]]])\n', (3415, 3630), True, 'import numpy as np\n'), ((3832, 4314), 'numpy.array', 'np.array', (['[[[170.0, 50.0, 90.0, 90.0], [170.0, 170.0, 47.0, 350.0], [10.0, 309.0, \n 10.0, 10.0]], [[170.0, 50.0, 90.0, 90.0], [170.0, 170.0, 47.0, 47.0], [\n 10.0, 10.0, 10.0, 10.0]], [[10.0, 50.0, 90.0, 90.0], [170.0, 170.0, \n 47.0, 47.0], [310.0, 309.0, 10.0, 10.0]], [[190.0, 40.0, 270.0, 90.0],\n [170.0, 170.0, 47.0, 47.0], [310.0, 309.0, 10.0, 10.0]], [[190.0, 40.0,\n 270.0, 270.0], [170.0, 170.0, 47.0, 47.0], [310.0, 309.0, 10.0, 10.0]]]'], {'dtype': 'np.float32'}), '([[[170.0, 50.0, 90.0, 90.0], [170.0, 170.0, 47.0, 350.0], [10.0, \n 309.0, 10.0, 10.0]], [[170.0, 50.0, 90.0, 90.0], [170.0, 170.0, 47.0, \n 47.0], [10.0, 10.0, 10.0, 10.0]], [[10.0, 50.0, 90.0, 90.0], [170.0, \n 170.0, 47.0, 47.0], [310.0, 309.0, 10.0, 10.0]], [[190.0, 40.0, 270.0, \n 90.0], [170.0, 170.0, 47.0, 47.0], [310.0, 309.0, 10.0, 10.0]], [[190.0,\n 40.0, 270.0, 270.0], [170.0, 170.0, 47.0, 47.0], [310.0, 309.0, 10.0, \n 10.0]]], dtype=np.float32)\n', (3840, 4314), True, 'import numpy as np\n'), ((4942, 5038), 'numpy.array', 'np.array', (['[[[90.0, 50.0], [270.0, 350.0]], [[270.0, 60.0], [290.0, 10.0]]]'], {'dtype': 'np.float32'}), '([[[90.0, 50.0], [270.0, 350.0]], [[270.0, 60.0], [290.0, 10.0]]],\n dtype=np.float32)\n', (4950, 5038), True, 'import numpy as np\n'), ((5366, 5462), 'numpy.array', 'np.array', (['[[[90.0, 50.0], [270.0, 350.0]], [[270.0, 60.0], [290.0, 10.0]]]'], {'dtype': 'np.float32'}), '([[[90.0, 50.0], [270.0, 350.0]], [[270.0, 60.0], [290.0, 10.0]]],\n dtype=np.float32)\n', (5374, 5462), True, 'import numpy as np\n'), ((5500, 5578), 'numpy.pad', 'np.pad', (['data', '((0, 0), (4, 4), (4, 4))', '"""constant"""'], {'constant_values': '(0.0, 0.0)'}), "(data, ((0, 0), (4, 4), (4, 4)), 'constant', constant_values=(0.0, 0.0))\n", (5506, 5578), True, 'import numpy as np\n'), ((5801, 5838), 'numpy.arange', 'np.arange', (['(-50000.0)', '(-31000.0)', '(2000.0)'], {}), '(-50000.0, -31000.0, 2000.0)\n', (5810, 5838), True, 'import numpy as np\n'), ((5873, 5904), 'numpy.arange', 'np.arange', (['(0.0)', '(19000.0)', '(2000.0)'], {}), '(0.0, 19000.0, 2000.0)\n', (5882, 5904), True, 'import numpy as np\n'), ((22464, 22479), 'unittest.main', 'unittest.main', ([], {}), '()\n', (22477, 22479), False, 'import unittest\n'), ((6082, 6097), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (6095, 6097), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((6262, 6306), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {'backup_method': '"""neighbourhood"""'}), "(backup_method='neighbourhood')\n", (6275, 6306), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((8693, 8723), 'numpy.array', 'np.array', (['[1 + 0.0j, 1 - 0.0j]'], {}), '([1 + 0.0j, 1 - 0.0j])\n', (8701, 8723), True, 'import numpy as np\n'), ((9345, 9360), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (9358, 9360), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((10186, 10201), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (10199, 10201), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((10525, 10666), 'numpy.array', 'np.array', (['[[176.636276, 46.002445, 90.0, 90.0], [170.0, 170.0, 47.0, 36.544231], [\n 333.413239, 320.035217, 10.0, 10.0]]'], {'dtype': 'np.float32'}), '([[176.636276, 46.002445, 90.0, 90.0], [170.0, 170.0, 47.0, \n 36.544231], [333.413239, 320.035217, 10.0, 10.0]], dtype=np.float32)\n', (10533, 10666), True, 'import numpy as np\n'), ((11684, 11699), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (11697, 11699), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((12474, 12589), 'iris.cube.Cube', 'Cube', (['COMPLEX_ANGLES'], {'standard_name': '"""wind_from_direction"""', 'dim_coords_and_dims': '[(longitude, 0)]', 'units': '"""degree"""'}), "(COMPLEX_ANGLES, standard_name='wind_from_direction',\n dim_coords_and_dims=[(longitude, 0)], units='degree')\n", (12478, 12589), False, 'from iris.cube import Cube\n'), ((12845, 12892), 'numpy.ones', 'np.ones', (['COMPLEX_ANGLES.shape'], {'dtype': 'np.float32'}), '(COMPLEX_ANGLES.shape, dtype=np.float32)\n', (12852, 12892), True, 'import numpy as np\n'), ((13308, 13323), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (13321, 13323), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((13433, 13523), 'numpy.array', 'np.array', (['[[6.123234e-17, 0.996194698], [0.984807753, 0.984807753]]'], {'dtype': 'np.float32'}), '([[6.123234e-17, 0.996194698], [0.984807753, 0.984807753]], dtype=\n np.float32)\n', (13441, 13523), True, 'import numpy as np\n'), ((13710, 13767), 'numpy.array', 'np.array', (['[[180.0, 55.0], [280.0, 0.0]]'], {'dtype': 'np.float32'}), '([[180.0, 55.0], [280.0, 0.0]], dtype=np.float32)\n', (13718, 13767), True, 'import numpy as np\n'), ((14300, 14355), 'numpy.array', 'np.array', (['[[0.0, 0.95638061], [0.91284426, 0.91284426]]'], {}), '([[0.0, 0.95638061], [0.91284426, 0.91284426]])\n', (14308, 14355), True, 'import numpy as np\n'), ((15053, 15101), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {'backup_method': '"""first_realization"""'}), "(backup_method='first_realization')\n", (15066, 15101), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((15460, 15498), 'numpy.array', 'np.array', (['[[90.0, 55.0], [280.0, 0.0]]'], {}), '([[90.0, 55.0], [280.0, 0.0]])\n', (15468, 15498), True, 'import numpy as np\n'), ((15521, 15562), 'numpy.array', 'np.array', (['[[True, False], [False, False]]'], {}), '([[True, False], [False, False]])\n', (15529, 15562), True, 'import numpy as np\n'), ((16131, 16171), 'numpy.array', 'np.array', (['[[354.91, 55.0], [280.0, 0.0]]'], {}), '([[354.91, 55.0], [280.0, 0.0]])\n', (16139, 16171), True, 'import numpy as np\n'), ((16428, 16467), 'numpy.array', 'np.array', (['[[180.0, 55.0], [280.0, 0.0]]'], {}), '([[180.0, 55.0], [280.0, 0.0]])\n', (16436, 16467), True, 'import numpy as np\n'), ((16491, 16535), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {'backup_method': '"""neighbourhood"""'}), "(backup_method='neighbourhood')\n", (16504, 16535), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((16862, 16956), 'numpy.pad', 'np.pad', (['WIND_DIR_COMPLEX', '((0, 0), (4, 4), (4, 4))', '"""constant"""'], {'constant_values': '(0.0 + 0.0j)'}), "(WIND_DIR_COMPLEX, ((0, 0), (4, 4), (4, 4)), 'constant',\n constant_values=0.0 + 0.0j)\n", (16868, 16956), True, 'import numpy as np\n'), ((17676, 17827), 'numpy.array', 'np.array', (['[[176.63627625, 46.00244522, 90.0, 90.0], [170.0, 170.0, 47.0, 36.54423141],\n [333.41320801, 320.03521729, 10.0, 10.0]]'], {'dtype': 'np.float32'}), '([[176.63627625, 46.00244522, 90.0, 90.0], [170.0, 170.0, 47.0, \n 36.54423141], [333.41320801, 320.03521729, 10.0, 10.0]], dtype=np.float32)\n', (17684, 17827), True, 'import numpy as np\n'), ((17953, 18086), 'numpy.array', 'np.array', (['[[0.5919044, 0.99634719, 0.2, 0.6], [1.0, 1.0, 1.0, 0.92427504], [\n 0.87177974, 0.91385943, 1.0, 1.0]]'], {'dtype': 'np.float32'}), '([[0.5919044, 0.99634719, 0.2, 0.6], [1.0, 1.0, 1.0, 0.92427504], [\n 0.87177974, 0.91385943, 1.0, 1.0]], dtype=np.float32)\n', (17961, 18086), True, 'import numpy as np\n'), ((18224, 18357), 'numpy.array', 'np.array', (['[[0.73166388, 0.95813018, 0.6, 0.8], [1.0, 1.0, 1.0, 0.84808648], [\n 0.75270665, 0.83861077, 1.0, 1.0]]'], {'dtype': 'np.float32'}), '([[0.73166388, 0.95813018, 0.6, 0.8], [1.0, 1.0, 1.0, 0.84808648],\n [0.75270665, 0.83861077, 1.0, 1.0]], dtype=np.float32)\n', (18232, 18357), True, 'import numpy as np\n'), ((19333, 19393), 'numpy.array', 'np.array', (['[[300.0, 270.0], [270.0, 300.0]]'], {'dtype': 'np.float32'}), '([[300.0, 270.0], [270.0, 300.0]], dtype=np.float32)\n', (19341, 19393), True, 'import numpy as np\n'), ((21203, 21247), 'numpy.full', 'np.full', (['(5, 10, 10)', '(30.0)'], {'dtype': 'np.float32'}), '((5, 10, 10), 30.0, dtype=np.float32)\n', (21210, 21247), True, 'import numpy as np\n'), ((21481, 21518), 'numpy.arange', 'np.arange', (['(-50000.0)', '(-31000.0)', '(2000.0)'], {}), '(-50000.0, -31000.0, 2000.0)\n', (21490, 21518), True, 'import numpy as np\n'), ((21557, 21588), 'numpy.arange', 'np.arange', (['(0.0)', '(19000.0)', '(2000.0)'], {}), '(0.0, 19000.0, 2000.0)\n', (21566, 21588), True, 'import numpy as np\n'), ((6600, 6638), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {'backup_method': '"""invalid"""'}), "(backup_method='invalid')\n", (6613, 6638), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((6817, 6832), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (6830, 6832), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((7937, 7960), 'numpy.arange', 'np.arange', (['(0.0)', '(360)', '(10)'], {}), '(0.0, 360, 10)\n', (7946, 7960), True, 'import numpy as np\n'), ((9096, 9119), 'numpy.arange', 'np.arange', (['(0.0)', '(360)', '(10)'], {}), '(0.0, 360, 10)\n', (9105, 9119), True, 'import numpy as np\n'), ((12377, 12403), 'numpy.linspace', 'np.linspace', (['(-180)', '(180)', '(36)'], {}), '(-180, 180, 36)\n', (12388, 12403), True, 'import numpy as np\n'), ((16250, 16291), 'numpy.array', 'np.array', (['[[True, False], [False, False]]'], {}), '([[True, False], [False, False]])\n', (16258, 16291), True, 'import numpy as np\n'), ((7314, 7329), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (7327, 7329), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((7557, 7572), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (7570, 7572), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((7692, 7707), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (7705, 7707), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((7906, 7921), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (7919, 7921), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((8737, 8752), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (8750, 8752), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((8955, 8970), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (8968, 8970), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((11043, 11062), 'numpy.absolute', 'np.absolute', (['result'], {}), '(result)\n', (11054, 11062), True, 'import numpy as np\n'), ((15264, 15303), 'numpy.array', 'np.array', (['[[180.0, 55.0], [280.0, 0.0]]'], {}), '([[180.0, 55.0], [280.0, 0.0]])\n', (15272, 15303), True, 'import numpy as np\n'), ((17083, 17159), 'numpy.pad', 'np.pad', (['wind_dir_deg_mean', '((4, 4), (4, 4))', '"""constant"""'], {'constant_values': '(0.0)'}), "(wind_dir_deg_mean, ((4, 4), (4, 4)), 'constant', constant_values=0.0)\n", (17089, 17159), True, 'import numpy as np\n'), ((18604, 18619), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (18617, 18619), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((19794, 19809), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (19807, 19809), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((20213, 20228), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (20226, 20228), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((21802, 21817), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (21815, 21817), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((8452, 8467), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (8465, 8467), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((19120, 19135), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (19133, 19135), False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((19596, 19611), 'improver.wind_calculations.wind_direction.WindDirection', 'WindDirection', ([], {}), '()\n', (19609, 19611), False, 'from improver.wind_calculations.wind_direction import WindDirection\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
import shutil
import itertools
from multiprocessing import Pool
import numpy as np
from dmriqcpy.analysis.stats import stats_mask_volume
from dmriqcpy.io.report import Report
from dmriqcpy.io.utils import (add_online_arg, add_overwrite_arg,
assert_inputs_exist, assert_outputs_exist)
from dmriqcpy.viz.graph import graph_mask_volume
from dmriqcpy.viz.screenshot import screenshot_mosaic_wrapper
from dmriqcpy.viz.utils import analyse_qa, dataframe_to_html
DESCRIPTION = """
Compute the tracking maps report in HTML format.
"""
def _build_arg_parser():
p = argparse.ArgumentParser(description=DESCRIPTION,
formatter_class=argparse.RawTextHelpFormatter)
p.add_argument('tracking_type', choices=["pft", "local"],
help='Tracking type')
p.add_argument('output_report',
help='HTML report')
p.add_argument('--seeding_mask', nargs='+', required=True,
help='Seeding mask in Nifti format')
p.add_argument('--tracking_mask', nargs='+',
help='Tracking mask in Nifti format')
p.add_argument('--map_include', nargs='+',
help='Map include in Nifti format')
p.add_argument('--map_exclude', nargs='+',
help='Map exlude in Nifti format')
p.add_argument('--skip', default=2, type=int,
help='Number of images skipped to build the '
'mosaic. [%(default)s]')
p.add_argument('--nb_columns', default=12, type=int,
help='Number of columns for the mosaic. [%(default)s]')
p.add_argument('--nb_threads', type=int, default=1,
help='Number of threads. [%(default)s]')
add_online_arg(p)
add_overwrite_arg(p)
return p
def _subj_parralel(subj_metric, summary, name, skip, nb_columns):
subjects_dict = {}
screenshot_path = screenshot_mosaic_wrapper(subj_metric,
output_prefix=name,
directory="data", skip=skip,
nb_columns=nb_columns)
summary_html = dataframe_to_html(summary.loc[subj_metric])
subjects_dict[subj_metric] = {}
subjects_dict[subj_metric]['screenshot'] = screenshot_path
subjects_dict[subj_metric]['stats'] = summary_html
return subjects_dict
def main():
parser = _build_arg_parser()
args = parser.parse_args()
if args.tracking_type == "local":
if not len(args.seeding_mask) == len(args.tracking_mask):
parser.error("Not the same number of images in input.")
all_images = np.concatenate([args.seeding_mask, args.tracking_mask])
else:
if not len(args.seeding_mask) == len(args.map_include) ==\
len(args.map_exclude):
parser.error("Not the same number of images in input.")
all_images = np.concatenate([args.seeding_mask, args.map_include,
args.map_exclude])
assert_inputs_exist(parser, all_images)
assert_outputs_exist(parser, args, [args.output_report, "data", "libs"])
if os.path.exists("data"):
shutil.rmtree("data")
os.makedirs("data")
if os.path.exists("libs"):
shutil.rmtree("libs")
if args.tracking_type == "local":
metrics_names = [[args.seeding_mask, 'Seeding mask'],
[args.tracking_mask, 'Tracking mask']]
else:
metrics_names = [[args.seeding_mask, 'Seeding mask'],
[args.map_include, 'Map include'],
[args.map_exclude, 'Maps exclude']]
metrics_dict = {}
summary_dict = {}
graphs = []
warning_dict = {}
for metrics, name in metrics_names:
columns = ["{} volume".format(name)]
summary, stats = stats_mask_volume(columns, metrics)
warning_dict[name] = analyse_qa(summary, stats, columns)
warning_list = np.concatenate([filenames for filenames in warning_dict[name].values()])
warning_dict[name]['nb_warnings'] = len(np.unique(warning_list))
graph = graph_mask_volume('{} mean volume'.format(name),
columns, summary, args.online)
graphs.append(graph)
stats_html = dataframe_to_html(stats)
summary_dict[name] = stats_html
subjects_dict = {}
pool = Pool(args.nb_threads)
subjects_dict_pool = pool.starmap(_subj_parralel,
zip(metrics,
itertools.repeat(summary),
itertools.repeat(name),
itertools.repeat(args.skip),
itertools.repeat(args.nb_columns)))
pool.close()
pool.join()
for dict_sub in subjects_dict_pool:
for key in dict_sub:
subjects_dict[key] = dict_sub[key]
metrics_dict[name] = subjects_dict
nb_subjects = len(args.seeding_mask)
report = Report(args.output_report)
report.generate(title="Quality Assurance tracking maps",
nb_subjects=nb_subjects, summary_dict=summary_dict,
graph_array=graphs, metrics_dict=metrics_dict,
warning_dict=warning_dict,
online=args.online)
if __name__ == '__main__':
main()
| [
"os.path.exists",
"dmriqcpy.viz.utils.dataframe_to_html",
"itertools.repeat",
"numpy.unique",
"argparse.ArgumentParser",
"os.makedirs",
"dmriqcpy.viz.utils.analyse_qa",
"dmriqcpy.io.report.Report",
"dmriqcpy.viz.screenshot.screenshot_mosaic_wrapper",
"dmriqcpy.io.utils.add_online_arg",
"dmriqcpy... | [((673, 773), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'DESCRIPTION', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=DESCRIPTION, formatter_class=argparse.\n RawTextHelpFormatter)\n', (696, 773), False, 'import argparse\n'), ((1833, 1850), 'dmriqcpy.io.utils.add_online_arg', 'add_online_arg', (['p'], {}), '(p)\n', (1847, 1850), False, 'from dmriqcpy.io.utils import add_online_arg, add_overwrite_arg, assert_inputs_exist, assert_outputs_exist\n'), ((1855, 1875), 'dmriqcpy.io.utils.add_overwrite_arg', 'add_overwrite_arg', (['p'], {}), '(p)\n', (1872, 1875), False, 'from dmriqcpy.io.utils import add_online_arg, add_overwrite_arg, assert_inputs_exist, assert_outputs_exist\n'), ((2003, 2117), 'dmriqcpy.viz.screenshot.screenshot_mosaic_wrapper', 'screenshot_mosaic_wrapper', (['subj_metric'], {'output_prefix': 'name', 'directory': '"""data"""', 'skip': 'skip', 'nb_columns': 'nb_columns'}), "(subj_metric, output_prefix=name, directory='data',\n skip=skip, nb_columns=nb_columns)\n", (2028, 2117), False, 'from dmriqcpy.viz.screenshot import screenshot_mosaic_wrapper\n'), ((2278, 2321), 'dmriqcpy.viz.utils.dataframe_to_html', 'dataframe_to_html', (['summary.loc[subj_metric]'], {}), '(summary.loc[subj_metric])\n', (2295, 2321), False, 'from dmriqcpy.viz.utils import analyse_qa, dataframe_to_html\n'), ((3144, 3183), 'dmriqcpy.io.utils.assert_inputs_exist', 'assert_inputs_exist', (['parser', 'all_images'], {}), '(parser, all_images)\n', (3163, 3183), False, 'from dmriqcpy.io.utils import add_online_arg, add_overwrite_arg, assert_inputs_exist, assert_outputs_exist\n'), ((3188, 3260), 'dmriqcpy.io.utils.assert_outputs_exist', 'assert_outputs_exist', (['parser', 'args', "[args.output_report, 'data', 'libs']"], {}), "(parser, args, [args.output_report, 'data', 'libs'])\n", (3208, 3260), False, 'from dmriqcpy.io.utils import add_online_arg, add_overwrite_arg, assert_inputs_exist, assert_outputs_exist\n'), ((3269, 3291), 'os.path.exists', 'os.path.exists', (['"""data"""'], {}), "('data')\n", (3283, 3291), False, 'import os\n'), ((3327, 3346), 'os.makedirs', 'os.makedirs', (['"""data"""'], {}), "('data')\n", (3338, 3346), False, 'import os\n'), ((3355, 3377), 'os.path.exists', 'os.path.exists', (['"""libs"""'], {}), "('libs')\n", (3369, 3377), False, 'import os\n'), ((5223, 5249), 'dmriqcpy.io.report.Report', 'Report', (['args.output_report'], {}), '(args.output_report)\n', (5229, 5249), False, 'from dmriqcpy.io.report import Report\n'), ((2773, 2828), 'numpy.concatenate', 'np.concatenate', (['[args.seeding_mask, args.tracking_mask]'], {}), '([args.seeding_mask, args.tracking_mask])\n', (2787, 2828), True, 'import numpy as np\n'), ((3030, 3101), 'numpy.concatenate', 'np.concatenate', (['[args.seeding_mask, args.map_include, args.map_exclude]'], {}), '([args.seeding_mask, args.map_include, args.map_exclude])\n', (3044, 3101), True, 'import numpy as np\n'), ((3301, 3322), 'shutil.rmtree', 'shutil.rmtree', (['"""data"""'], {}), "('data')\n", (3314, 3322), False, 'import shutil\n'), ((3387, 3408), 'shutil.rmtree', 'shutil.rmtree', (['"""libs"""'], {}), "('libs')\n", (3400, 3408), False, 'import shutil\n'), ((3959, 3994), 'dmriqcpy.analysis.stats.stats_mask_volume', 'stats_mask_volume', (['columns', 'metrics'], {}), '(columns, metrics)\n', (3976, 3994), False, 'from dmriqcpy.analysis.stats import stats_mask_volume\n'), ((4025, 4060), 'dmriqcpy.viz.utils.analyse_qa', 'analyse_qa', (['summary', 'stats', 'columns'], {}), '(summary, stats, columns)\n', (4035, 4060), False, 'from dmriqcpy.viz.utils import analyse_qa, dataframe_to_html\n'), ((4412, 4436), 'dmriqcpy.viz.utils.dataframe_to_html', 'dataframe_to_html', (['stats'], {}), '(stats)\n', (4429, 4436), False, 'from dmriqcpy.viz.utils import analyse_qa, dataframe_to_html\n'), ((4520, 4541), 'multiprocessing.Pool', 'Pool', (['args.nb_threads'], {}), '(args.nb_threads)\n', (4524, 4541), False, 'from multiprocessing import Pool\n'), ((4205, 4228), 'numpy.unique', 'np.unique', (['warning_list'], {}), '(warning_list)\n', (4214, 4228), True, 'import numpy as np\n'), ((4701, 4726), 'itertools.repeat', 'itertools.repeat', (['summary'], {}), '(summary)\n', (4717, 4726), False, 'import itertools\n'), ((4774, 4796), 'itertools.repeat', 'itertools.repeat', (['name'], {}), '(name)\n', (4790, 4796), False, 'import itertools\n'), ((4844, 4871), 'itertools.repeat', 'itertools.repeat', (['args.skip'], {}), '(args.skip)\n', (4860, 4871), False, 'import itertools\n'), ((4919, 4952), 'itertools.repeat', 'itertools.repeat', (['args.nb_columns'], {}), '(args.nb_columns)\n', (4935, 4952), False, 'import itertools\n')] |
from scipy.sparse import csc_matrix
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
class Dispersion(object):
def __init__(self, corpus=None, term_doc_mat=None):
"""
From https://www.researchgate.net/publication/332120488_Analyzing_dispersion
<NAME>. Analyzing dispersion. April 2019. Practical handbook of corpus linguistics. Springer.
Parts are considered documents
:param X: term document (part) matrix
"""
'''
following Gries' notation, for the following example:
b a m n i b e u p
b a s a t b e w q n
b c a g a b e s t a
b a g h a b e a a t
b a h a a b e a x a t
(1) l = 50 (the length of the corpus in words)
(2) n = 5 (the length of the corpus in parts)
(3) s = (0.18, 0.2, 0.2, 0.2, 0.22) (the percentages of the n corpus part sizes)
(4) f = 15 (the overall frequency of a in the corpus)
(5) v = (1, 2, 3, 4, 5) (the frequencies of a in each corpus part 1-n)
(6) p = (1/9, 2/10, 3/10, 4/10, 5 /11) (the percentages a makes up of each corpus part 1-n)
'''
self.corpus = None
if corpus is not None:
self.corpus = corpus
X = corpus.get_term_doc_mat()
else:
X = term_doc_mat
part_sizes = X.sum(axis=1)
self.l = X.sum().sum()
self.n = X.shape[0]
self.f = X.sum(axis=0)
self.v = X
self.p = X.multiply(csc_matrix(1. / X.sum(axis=1)))
self.s = part_sizes / self.l
def dispersion_range(self):
"""
range: number of parts containing a = 5
"""
return (self.v > 0).sum(axis=0).A1
def sd_population(self):
return np.sqrt(StandardScaler(with_mean=False).fit(self.v).var_)
def vc(self):
"""
Direct quote from Gries (2019)
A maybe more useful variant of this measure is its normalized version, the variation
coefficient (vc, see (9)); the normalization consists of dividing sdpopulation by the mean frequency
of the element in the corpus parts f/n:
"""
ss = StandardScaler(with_mean=False).fit(self.v)
return np.sqrt(ss.var_) / ss.mean_
def jullands_d(self):
"""
The version of Juilland's D that can handle differently large corpus parts is then computed
as shown in (10). In order to accommodate the different sizes of the corpus parts, however, the
variation coefficient is not computed using the observed frequencies v1-n (i.e. 1, 2, 3, 4, 5 in files
1 to 5 respectively, see (5) above) but using the percentages in p1-n (i.e. how much of each corpus
part is made up by the element in question, i.e. 1/9, 2/10, 3/10, 4/10, 5/11, see (6) above), which is what
corrects for differently large corpus parts:
"""
ss = StandardScaler(with_mean=False).fit(self.p)
return 1 - (np.sqrt(ss.var_) / ss.mean_) / np.sqrt(self.n - 1)
def rosengrens(self):
'''
The version of Rosengren’s S that can handle differently large corpus parts is
shown in (12). Each corpus part size’s in percent (in s) is multiplied with the
frequencies of the element in question in each corpus part (in v1-n); of each product,
one takes the square root, and those are summed up, that sum is squared, and divided
by the overall frequency of the element in question in the corpus (f)'''
return np.power(
np.sqrt(self.v.multiply(self.s)).sum(axis=0).A1,
2
) * 1. / self.get_frequency()
def dp(self):
'''
Finally, Gries (2008, 2010) and the follow-up by Lijffijt and Gries (2012)
proposed a measure called DP (for deviation of proportions), which falls between
1-min s (for an extremely even distribution) and 1 (for an extremely clumpy
distribution) as well as a normalized version of DP, DPnorm, which falls between 0
and 1, which are computed as shown in (13). For DP, one computes the differences
between how much of the element in question is in each corpus file in percent on the
one hand and the sizes of the corpus parts in percent on the other – i.e. the differences
between observed and expected percentages. Then, one adds up the absolute values
of those and multiplies by 0.5; the normalization then consists of dividing this values
by the theoretically maximum value of DP given the number of corpus parts (in a
way reminiscent of (11)'''
return np.sum(np.abs(self.v.multiply(1. / self.get_frequency()) - self.s),
axis=0).A1 / 2
def dp_norm(self):
return self.dp() / (1 - self.s.min())
def kl_divergence(self):
'''The final measure to be discussed here is one that, as far as I can tell, has never
been proposed as a measure of dispersion, but seems to me to be ideally suited to be
one, namely the Kullback-Leibler (or KL-) divergence, a non-symmetric measure
that quantifies how different one probability distribution (e.g., the distribution of
all the occurrences of a across all corpus parts, i.e. v/f) is from another (e.g., the
corpus part sizes s); the KL-divergence is computed as shown in (14) (with log2s of 167
0 defined as 0):'''
vf = self.v.multiply(1. / self.f)
vfs = vf.multiply(1. / self.s)
vfs.data = np.log(vfs.data) / np.log(2)
return np.sum(vf.multiply(vfs), axis=0).A1
def da(self):
'''
Metrics from Burch (2017).
<NAME>, <NAME> and <NAME>. Measuring Lexical Dispersion in Corpus Linguistics. JRDS. 2016.
Article: https://journal.equinoxpub.com/JRDS/article/view/9480
D_A = 1 - ((n * (n - 1))/2) * sum_{i in 0, n - 1} sum{j in i + 1, n} |v_i - v_j|/(2*mean(v))
:return:
'''
n = self.n
constant = 1./(n * (n - 1)/2)
da = []
for word_i in range(self.v.shape[1]):
y = self.v.T[word_i].todense().A1
yt = np.tile(y, (n, 1))
s = np.sum(np.abs(yt - yt.T)) / 2
da.append(1 - constant * s * 0.5 * y.mean())
return np.array(da)
def get_df(self, terms = None):
if terms is None and self.corpus is not None:
terms = self.corpus.get_terms()
df_content = {
'Frequency': self.get_frequency(),
'Range': self.dispersion_range(),
'SD': self.sd_population(),
'VC': self.vc(),
"Juilland's D": self.jullands_d(),
"Rosengren's S": self.rosengrens(),
'DP': self.dp(),
'DP norm': self.dp_norm(),
'KL-divergence': self.kl_divergence()
}
if terms is None:
return pd.DataFrame(df_content)
return pd.DataFrame(df_content, index=terms)
def get_frequency(self):
return self.f.A1
| [
"numpy.tile",
"numpy.abs",
"numpy.sqrt",
"numpy.log",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"pandas.DataFrame"
] | [((6308, 6320), 'numpy.array', 'np.array', (['da'], {}), '(da)\n', (6316, 6320), True, 'import numpy as np\n'), ((6949, 6986), 'pandas.DataFrame', 'pd.DataFrame', (['df_content'], {'index': 'terms'}), '(df_content, index=terms)\n', (6961, 6986), True, 'import pandas as pd\n'), ((2244, 2260), 'numpy.sqrt', 'np.sqrt', (['ss.var_'], {}), '(ss.var_)\n', (2251, 2260), True, 'import numpy as np\n'), ((5537, 5553), 'numpy.log', 'np.log', (['vfs.data'], {}), '(vfs.data)\n', (5543, 5553), True, 'import numpy as np\n'), ((5556, 5565), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (5562, 5565), True, 'import numpy as np\n'), ((6170, 6188), 'numpy.tile', 'np.tile', (['y', '(n, 1)'], {}), '(y, (n, 1))\n', (6177, 6188), True, 'import numpy as np\n'), ((6909, 6933), 'pandas.DataFrame', 'pd.DataFrame', (['df_content'], {}), '(df_content)\n', (6921, 6933), True, 'import pandas as pd\n'), ((2185, 2216), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': '(False)'}), '(with_mean=False)\n', (2199, 2216), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2928, 2959), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': '(False)'}), '(with_mean=False)\n', (2942, 2959), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3023, 3042), 'numpy.sqrt', 'np.sqrt', (['(self.n - 1)'], {}), '(self.n - 1)\n', (3030, 3042), True, 'import numpy as np\n'), ((2992, 3008), 'numpy.sqrt', 'np.sqrt', (['ss.var_'], {}), '(ss.var_)\n', (2999, 3008), True, 'import numpy as np\n'), ((6212, 6229), 'numpy.abs', 'np.abs', (['(yt - yt.T)'], {}), '(yt - yt.T)\n', (6218, 6229), True, 'import numpy as np\n'), ((1790, 1821), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': '(False)'}), '(with_mean=False)\n', (1804, 1821), False, 'from sklearn.preprocessing import StandardScaler\n')] |
import sys
sys.path.append("Mask_RCNN")
import os
import sys
import glob
import osmmodelconfig
import skimage
import math
import imagestoosm.config as osmcfg
import model as modellib
import visualize as vis
import numpy as np
import csv
import QuadKey.quadkey as quadkey
import shapely.geometry as geometry
import shapely.affinity as affinity
import matplotlib.pyplot as plt
import cv2
import scipy.optimize
import time
from skimage import draw
from skimage import io
showFigures = False
def toDegrees(rad):
return rad * 180/math.pi
def writeOSM( osmFileName,featureName, simpleContour,tilePixel, qkRoot) :
with open(osmFileName,"wt",encoding="ascii") as f:
f.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
f.write("<osm version=\"0.6\">\n")
id = -1
for pt in simpleContour :
geo = quadkey.TileSystem.pixel_to_geo( (pt[0,0]+tilePixel[0],pt[0,1]+tilePixel[1]),qkRoot.level)
f.write(" <node id=\"{}\" lat=\"{}\" lon=\"{}\" />\n".format(id,geo[0],geo[1]))
id -= 1
f.write(" <way id=\"{}\" visible=\"true\">\n".format(id))
id = -1
for pt in simpleContour :
f.write(" <nd ref=\"{}\" />\n".format(id))
id -= 1
f.write(" <nd ref=\"{}\" />\n".format(-1))
f.write(" <tag k=\"{}\" v=\"{}\" />\n".format("leisure","pitch"))
f.write(" <tag k=\"{}\" v=\"{}\" />\n".format("sport",featureName))
f.write(" </way>\n")
f.write("</osm>\n")
f.close
def writeShape(wayNumber, finalShape, image, bbTop,bbHeight,bbLeft,bbWidth) :
nPts = int(finalShape.length)
if ( nPts > 5000) :
nPts = 5000
fitContour = np.zeros((nPts,1,2), dtype=np.int32)
if ( nPts > 3):
for t in range(0,nPts) :
pt = finalShape.interpolate(t)
fitContour[t,0,0] = pt.x
fitContour[t,0,1] = pt.y
fitContour = [ fitContour ]
fitContour = [ cv2.approxPolyDP(cnt,2,True) for cnt in fitContour]
image = np.copy(imageNoMasks)
cv2.drawContours(image, fitContour,-1, (0,255,0), 2)
if ( showFigures ):
fig.add_subplot(2,2,3)
plt.title(featureName + " " + str(r['scores'][i]) + " Fit")
plt.imshow(image[bbTop:bbTop+bbHeight,bbLeft:bbLeft+bbWidth])
while ( os.path.exists( "anomaly/add/{0:06d}.osm".format(wayNumber) )) :
wayNumber += 1
debugFileName = os.path.join( inference_config.ROOT_DIR, "anomaly","add","{0:06d}.jpg".format(wayNumber))
io.imsave(debugFileName,image[bbTop:bbTop+bbHeight,bbLeft:bbLeft+bbWidth],quality=100)
osmFileName = os.path.join( inference_config.ROOT_DIR, "anomaly","add","{0:06d}.osm".format(wayNumber))
writeOSM( osmFileName,featureName, fitContour[0],tilePixel, qkRoot)
if (showFigures ):
plt.show(block=False)
plt.pause(0.05)
return wayNumber
ROOT_DIR_ = os.path.dirname(os.path.realpath(sys.argv[0]))
MODEL_DIR = os.path.join(ROOT_DIR_, "logs")
class InferenceConfig(osmmodelconfig.OsmModelConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
ROOT_DIR = ROOT_DIR_
inference_config = InferenceConfig()
fullTrainingDir = os.path.join( ROOT_DIR_, osmcfg.trainDir,"*")
fullImageList = []
for imageDir in glob.glob(fullTrainingDir):
if ( os.path.isdir( os.path.join( fullTrainingDir, imageDir) )):
id = os.path.split(imageDir)[1]
fullImageList.append( id)
# Training dataset
dataset_full = osmmodelconfig.OsmImagesDataset(ROOT_DIR_)
dataset_full.load(fullImageList, inference_config.IMAGE_SHAPE[0], inference_config.IMAGE_SHAPE[1])
dataset_full.prepare()
inference_config.display()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
model_path = model.find_last()[1]
print(model_path)
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
print("Reading in OSM data")
# load up the OSM features into hash of arrays of polygons, in pixels
features = {}
for classDir in os.listdir(osmcfg.rootOsmDir) :
classDirFull = os.path.join( osmcfg.rootOsmDir,classDir)
for fileName in os.listdir(classDirFull) :
fullPath = os.path.join( osmcfg.rootOsmDir,classDir,fileName)
with open(fullPath, "rt") as csvfile:
csveader = csv.reader(csvfile, delimiter='\t')
pts = []
for row in csveader:
latLot = (float(row[0]),float(row[1]))
pixel = quadkey.TileSystem.geo_to_pixel(latLot,osmcfg.tileZoom)
pts.append(pixel)
feature = {
"geometry" : geometry.Polygon(pts),
"filename" : fullPath
}
if ( (classDir in features) == False) :
features[classDir] = []
features[classDir].append( feature )
# make the output dirs, a fresh start is possible just by deleting anomaly
if ( not os.path.isdir("anomaly")) :
os.mkdir("anomaly")
if ( not os.path.isdir("anomaly/add")) :
os.mkdir("anomaly/add")
if ( not os.path.isdir("anomaly/replace")) :
os.mkdir("anomaly/replace")
if ( not os.path.isdir("anomaly/overlap")) :
os.mkdir("anomaly/overlap")
fig = {}
if ( showFigures):
fig = plt.figure()
wayNumber = 0
startTime = time.time()
count = 1
for image_index in dataset_full.image_ids :
currentTime = time.time()
howLong = currentTime-startTime
secPerImage = howLong/count
imagesLeft = len(dataset_full.image_ids)-count
timeLeftHrs = (imagesLeft*secPerImage)/3600.0
print("Processing {} of {} {:2.1f} hrs left".format(count,len(dataset_full.image_ids),timeLeftHrs))
count += 1
image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset_full, inference_config,image_index, use_mini_mask=False)
info = dataset_full.image_info[image_index]
# get the pixel location for this training image.
metaFileName = os.path.join( inference_config.ROOT_DIR, osmcfg.trainDir,info['id'],info['id']+".txt")
quadKeyStr = ""
with open(metaFileName) as metafile:
quadKeyStr = metafile.readline()
quadKeyStr = quadKeyStr.strip()
qkRoot = quadkey.from_str(quadKeyStr)
tilePixel = quadkey.TileSystem.geo_to_pixel(qkRoot.to_geo(), qkRoot.level)
# run the network
results = model.detect([image], verbose=0)
r = results[0]
maxImageSize = 256*3
featureMask = np.zeros((maxImageSize, maxImageSize), dtype=np.uint8)
pts = []
pts.append( ( tilePixel[0]+0,tilePixel[1]+0 ) )
pts.append( ( tilePixel[0]+0,tilePixel[1]+maxImageSize ) )
pts.append( ( tilePixel[0]+maxImageSize,tilePixel[1]+maxImageSize ) )
pts.append( ( tilePixel[0]+maxImageSize,tilePixel[1]+0 ) )
imageBoundingBoxPoly = geometry.Polygon(pts)
foundFeatures = {}
for featureType in osmmodelconfig.featureNames.keys() :
foundFeatures[featureType ] = []
for feature in features[featureType] :
if ( imageBoundingBoxPoly.intersects( feature['geometry']) ) :
xs, ys = feature['geometry'].exterior.coords.xy
outOfRangeCount = len([ x for x in xs if x < tilePixel[0] or x >= tilePixel[0]+maxImageSize ])
outOfRangeCount += len([ y for y in ys if y < tilePixel[1] or y >= tilePixel[1]+maxImageSize ])
if ( outOfRangeCount == 0) :
foundFeatures[featureType ].append( feature)
# draw black lines showing where osm data is
for featureType in osmmodelconfig.featureNames.keys() :
for feature in foundFeatures[featureType] :
xs, ys = feature['geometry'].exterior.coords.xy
xs = [ x-tilePixel[0] for x in xs]
ys = [ y-tilePixel[1] for y in ys]
rr, cc = draw.polygon_perimeter(xs,ys,(maxImageSize,maxImageSize))
image[cc,rr] = 0
imageNoMasks = np.copy(image)
for i in range( len(r['class_ids'])) :
mask = r['masks'][:,:,i]
edgePixels = 15
outside = np.sum( mask[0:edgePixels,:]) + np.sum( mask[-edgePixels:-1,:]) + np.sum( mask[:,0:edgePixels]) + np.sum( mask[:,-edgePixels:-1])
image = np.copy(imageNoMasks)
if ( r['scores'][i] > 0.98 and outside == 0 ) :
featureFound = False
for featureType in osmmodelconfig.featureNames.keys() :
for feature in foundFeatures[featureType] :
classId = osmmodelconfig.featureNames[featureType]
if ( classId == r['class_ids'][i] ) :
xs, ys = feature['geometry'].exterior.coords.xy
xs = [ x-tilePixel[0] for x in xs]
ys = [ y-tilePixel[1] for y in ys]
xsClipped = [ min( max( x,0),maxImageSize) for x in xs]
ysClipped = [ min( max( y,0),maxImageSize) for y in ys]
featureMask.fill(0)
rr, cc = draw.polygon(xs,ys,(maxImageSize,maxImageSize))
featureMask[cc,rr] = 1
maskAnd = featureMask * mask
overlap = np.sum(maskAnd )
if ( outside == 0 and overlap > 0) :
featureFound = True
if ( featureFound == False) :
weight = 0.25
# get feature name
featureName = ""
for featureType in osmmodelconfig.featureNames.keys() :
if ( osmmodelconfig.featureNames[featureType] == r['class_ids'][i] ) :
featureName = featureType
#if ( r['class_ids'][i] == 1):
# vis.apply_mask(image,mask,[weight,0,0])
#if ( r['class_ids'][i] == 2):
# vis.apply_mask(image,mask,[weight,weight,0])
#if ( r['class_ids'][i] == 3):
# vis.apply_mask(image,mask,[0.0,0,weight])
mask = mask.astype(np.uint8)
mask = mask * 255
ret,thresh = cv2.threshold(mask,127,255,0)
im2, rawContours,h = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
bbLeft,bbTop,bbWidth,bbHeight = cv2.boundingRect(rawContours[0])
bbBuffer = 75
bbLeft = max(bbLeft-bbBuffer,0)
bbRight = min(bbLeft+2*bbBuffer+bbWidth,maxImageSize)
bbWidth = bbRight-bbLeft
bbTop = max(bbTop-bbBuffer,0)
bbBottom = min(bbTop+2*bbBuffer+bbHeight,maxImageSize-1)
bbHeight = bbBottom-bbTop
image = np.copy(imageNoMasks)
cv2.drawContours(image, rawContours,-1, (0,255,0), 2)
if ( showFigures ):
fig.add_subplot(2,2,1)
plt.title(featureName + " " + str(r['scores'][i]) + " Raw")
plt.imshow(image[bbTop:bbTop+bbHeight,bbLeft:bbLeft+bbWidth])
simpleContour = [ cv2.approxPolyDP(cnt,5,True) for cnt in rawContours]
image = np.copy(imageNoMasks)
cv2.drawContours(image, simpleContour,-1, (0,255,0), 2)
if ( showFigures ):
fig.add_subplot(2,2,2)
plt.title(featureName + " " + str(r['scores'][i]) + " Simplify")
plt.imshow(image[bbTop:bbTop+bbHeight,bbLeft:bbLeft+bbWidth])
simpleContour = simpleContour[0]
print(" {}".format(featureName))
if ( featureName == "baseball" and isinstance(simpleContour,np.ndarray) ):
while ( os.path.exists( "anomaly/add/{0:06d}.osm".format(wayNumber) )) :
wayNumber += 1
debugFileName = os.path.join( inference_config.ROOT_DIR, "anomaly","add","{0:06d}.jpg".format(wayNumber))
io.imsave(debugFileName,image[bbTop:bbTop+bbHeight,bbLeft:bbLeft+bbWidth],quality=100)
osmFileName = os.path.join( inference_config.ROOT_DIR, "anomaly","add","{0:06d}.osm".format(wayNumber))
writeOSM( osmFileName,featureName, simpleContour,tilePixel, qkRoot)
fitContour = simpleContour
if ( featureName == 'baseball' ) :
def makePie(paramsX):
centerX,centerY,width,angle = paramsX
pts = []
pts.append((0,0))
pts.append((width,0))
step = math.pi/10
r = step
while r < math.pi/2:
x = math.cos(r)*width
y = math.sin(r)*width
pts.append( (x,y) )
r += step
pts.append( (0,width))
pts.append( (0,0))
fitShape = geometry.LineString(pts)
fitShape = affinity.translate(fitShape, -width/2,-width/2 )
fitShape = affinity.rotate(fitShape,angle )
fitShape = affinity.translate(fitShape, centerX,centerY )
return fitShape
def fitPie(paramsX):
fitShape = makePie(paramsX)
huberCutoff = 5
sum = 0
for cnt in rawContours:
for pt in cnt:
p = geometry.Point(pt[0])
d = p.distance(fitShape)
if ( d < huberCutoff) :
sum += 0.5 * d * d
else:
sum += huberCutoff*(math.fabs(d)-0.5*huberCutoff)
return sum
cm = np.mean( rawContours[0],axis=0)
results = []
angleStepCount = 8
for angleI in range(angleStepCount):
centerX = cm[0,0]
centerY = cm[0,1]
width = math.sqrt(cv2.contourArea(rawContours[0]))
angle = 360 * float(angleI)/angleStepCount
x0 = np.array([centerX,centerY,width,angle ])
resultR = scipy.optimize.minimize(fitPie, x0, method='nelder-mead', options={'xtol': 1e-6,'maxiter':50 })
results.append(resultR)
bestScore = 1e100
bestResult = {}
for result in results:
if result.fun < bestScore :
bestScore = result.fun
bestResult = result
bestResult = scipy.optimize.minimize(fitPie, bestResult.x, method='nelder-mead', options={'xtol': 1e-6 })
finalShape = makePie(bestResult.x)
wayNumber = writeShape(wayNumber, finalShape, image, bbTop,bbHeight,bbLeft,bbWidth)
for result in results:
angle = result.x[3]
angleDelta = int(math.fabs(result.x[3]-bestResult.x[3])) % 360
if result.fun < 1.2*bestScore and angleDelta > 45 :
result = scipy.optimize.minimize(fitPie, result.x, method='nelder-mead', options={'xtol': 1e-6 })
finalShape = makePie(result.x)
wayNumber = writeShape(wayNumber, finalShape, image, bbTop,bbHeight,bbLeft,bbWidth)
else:
def makeRect(paramsX):
centerX,centerY,width,height,angle = paramsX
pts = [
(-width/2,height/2),
(width/2,height/2),
(width/2,-height/2),
(-width/2,-height/2),
(-width/2,height/2)]
fitShape = geometry.LineString(pts)
fitShape = affinity.rotate(fitShape, angle,use_radians=True )
fitShape = affinity.translate(fitShape, centerX,centerY )
return fitShape
def fitRect(paramsX):
fitShape = makeRect(paramsX)
sum = 0
for cnt in rawContours:
for pt in cnt:
p = geometry.Point(pt[0])
d = p.distance(fitShape)
sum += d*d
return sum
cm = np.mean( rawContours[0],axis=0)
result = {}
angleStepCount = 8
for angleI in range(angleStepCount):
centerX = cm[0,0]
centerY = cm[0,1]
width = math.sqrt(cv2.contourArea(rawContours[0]))
height = width
angle = 2*math.pi * float(angleI)/angleStepCount
x0 = np.array([centerX,centerY,width,height,angle ])
resultR = scipy.optimize.minimize(fitRect, x0, method='nelder-mead', options={'xtol': 1e-6,'maxiter':50 })
if ( angleI == 0):
result = resultR
if ( resultR.fun < result.fun):
result = resultR
#print("{} {}".format(angle * 180.0 / math.pi,resultR.fun ))
resultR = scipy.optimize.minimize(fitRect, resultR.x, method='nelder-mead', options={'xtol': 1e-6 })
#print(result)
finalShape = makeRect(result.x)
wayNumber = writeShape(wayNumber, finalShape, image, bbTop,bbHeight,bbLeft,bbWidth)
| [
"shapely.geometry.Point",
"math.cos",
"numpy.array",
"shapely.geometry.Polygon",
"cv2.approxPolyDP",
"QuadKey.quadkey.TileSystem.geo_to_pixel",
"sys.path.append",
"matplotlib.pyplot.imshow",
"numpy.mean",
"os.listdir",
"cv2.threshold",
"model.load_image_gt",
"os.path.split",
"cv2.contourAr... | [((11, 39), 'sys.path.append', 'sys.path.append', (['"""Mask_RCNN"""'], {}), "('Mask_RCNN')\n", (26, 39), False, 'import sys\n'), ((3072, 3103), 'os.path.join', 'os.path.join', (['ROOT_DIR_', '"""logs"""'], {}), "(ROOT_DIR_, 'logs')\n", (3084, 3103), False, 'import os\n'), ((3282, 3327), 'os.path.join', 'os.path.join', (['ROOT_DIR_', 'osmcfg.trainDir', '"""*"""'], {}), "(ROOT_DIR_, osmcfg.trainDir, '*')\n", (3294, 3327), False, 'import os\n'), ((3363, 3389), 'glob.glob', 'glob.glob', (['fullTrainingDir'], {}), '(fullTrainingDir)\n', (3372, 3389), False, 'import glob\n'), ((3571, 3613), 'osmmodelconfig.OsmImagesDataset', 'osmmodelconfig.OsmImagesDataset', (['ROOT_DIR_'], {}), '(ROOT_DIR_)\n', (3602, 3613), False, 'import osmmodelconfig\n'), ((3812, 3898), 'model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'config': 'inference_config', 'model_dir': 'MODEL_DIR'}), "(mode='inference', config=inference_config, model_dir=\n MODEL_DIR)\n", (3829, 3898), True, 'import model as modellib\n'), ((4488, 4517), 'os.listdir', 'os.listdir', (['osmcfg.rootOsmDir'], {}), '(osmcfg.rootOsmDir)\n', (4498, 4517), False, 'import os\n'), ((5742, 5753), 'time.time', 'time.time', ([], {}), '()\n', (5751, 5753), False, 'import time\n'), ((1720, 1758), 'numpy.zeros', 'np.zeros', (['(nPts, 1, 2)'], {'dtype': 'np.int32'}), '((nPts, 1, 2), dtype=np.int32)\n', (1728, 1758), True, 'import numpy as np\n'), ((3029, 3058), 'os.path.realpath', 'os.path.realpath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (3045, 3058), False, 'import os\n'), ((4539, 4580), 'os.path.join', 'os.path.join', (['osmcfg.rootOsmDir', 'classDir'], {}), '(osmcfg.rootOsmDir, classDir)\n', (4551, 4580), False, 'import os\n'), ((4601, 4625), 'os.listdir', 'os.listdir', (['classDirFull'], {}), '(classDirFull)\n', (4611, 4625), False, 'import os\n'), ((5387, 5411), 'os.path.isdir', 'os.path.isdir', (['"""anomaly"""'], {}), "('anomaly')\n", (5400, 5411), False, 'import os\n'), ((5419, 5438), 'os.mkdir', 'os.mkdir', (['"""anomaly"""'], {}), "('anomaly')\n", (5427, 5438), False, 'import os\n'), ((5448, 5476), 'os.path.isdir', 'os.path.isdir', (['"""anomaly/add"""'], {}), "('anomaly/add')\n", (5461, 5476), False, 'import os\n'), ((5484, 5507), 'os.mkdir', 'os.mkdir', (['"""anomaly/add"""'], {}), "('anomaly/add')\n", (5492, 5507), False, 'import os\n'), ((5517, 5549), 'os.path.isdir', 'os.path.isdir', (['"""anomaly/replace"""'], {}), "('anomaly/replace')\n", (5530, 5549), False, 'import os\n'), ((5557, 5584), 'os.mkdir', 'os.mkdir', (['"""anomaly/replace"""'], {}), "('anomaly/replace')\n", (5565, 5584), False, 'import os\n'), ((5594, 5626), 'os.path.isdir', 'os.path.isdir', (['"""anomaly/overlap"""'], {}), "('anomaly/overlap')\n", (5607, 5626), False, 'import os\n'), ((5634, 5661), 'os.mkdir', 'os.mkdir', (['"""anomaly/overlap"""'], {}), "('anomaly/overlap')\n", (5642, 5661), False, 'import os\n'), ((5701, 5713), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5711, 5713), True, 'import matplotlib.pyplot as plt\n'), ((5827, 5838), 'time.time', 'time.time', ([], {}), '()\n', (5836, 5838), False, 'import time\n'), ((6184, 6276), 'model.load_image_gt', 'modellib.load_image_gt', (['dataset_full', 'inference_config', 'image_index'], {'use_mini_mask': '(False)'}), '(dataset_full, inference_config, image_index,\n use_mini_mask=False)\n', (6206, 6276), True, 'import model as modellib\n'), ((6394, 6488), 'os.path.join', 'os.path.join', (['inference_config.ROOT_DIR', 'osmcfg.trainDir', "info['id']", "(info['id'] + '.txt')"], {}), "(inference_config.ROOT_DIR, osmcfg.trainDir, info['id'], info[\n 'id'] + '.txt')\n", (6406, 6488), False, 'import os\n'), ((6636, 6664), 'QuadKey.quadkey.from_str', 'quadkey.from_str', (['quadKeyStr'], {}), '(quadKeyStr)\n', (6652, 6664), True, 'import QuadKey.quadkey as quadkey\n'), ((6877, 6931), 'numpy.zeros', 'np.zeros', (['(maxImageSize, maxImageSize)'], {'dtype': 'np.uint8'}), '((maxImageSize, maxImageSize), dtype=np.uint8)\n', (6885, 6931), True, 'import numpy as np\n'), ((7230, 7251), 'shapely.geometry.Polygon', 'geometry.Polygon', (['pts'], {}), '(pts)\n', (7246, 7251), True, 'import shapely.geometry as geometry\n'), ((7300, 7334), 'osmmodelconfig.featureNames.keys', 'osmmodelconfig.featureNames.keys', ([], {}), '()\n', (7332, 7334), False, 'import osmmodelconfig\n'), ((7986, 8020), 'osmmodelconfig.featureNames.keys', 'osmmodelconfig.featureNames.keys', ([], {}), '()\n', (8018, 8020), False, 'import osmmodelconfig\n'), ((8359, 8373), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (8366, 8373), True, 'import numpy as np\n'), ((2094, 2115), 'numpy.copy', 'np.copy', (['imageNoMasks'], {}), '(imageNoMasks)\n', (2101, 2115), True, 'import numpy as np\n'), ((2124, 2179), 'cv2.drawContours', 'cv2.drawContours', (['image', 'fitContour', '(-1)', '(0, 255, 0)', '(2)'], {}), '(image, fitContour, -1, (0, 255, 0), 2)\n', (2140, 2179), False, 'import cv2\n'), ((2618, 2715), 'skimage.io.imsave', 'io.imsave', (['debugFileName', 'image[bbTop:bbTop + bbHeight, bbLeft:bbLeft + bbWidth]'], {'quality': '(100)'}), '(debugFileName, image[bbTop:bbTop + bbHeight, bbLeft:bbLeft +\n bbWidth], quality=100)\n', (2627, 2715), False, 'from skimage import io\n'), ((2926, 2947), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (2934, 2947), True, 'import matplotlib.pyplot as plt\n'), ((2956, 2971), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.05)'], {}), '(0.05)\n', (2965, 2971), True, 'import matplotlib.pyplot as plt\n'), ((3416, 3455), 'os.path.join', 'os.path.join', (['fullTrainingDir', 'imageDir'], {}), '(fullTrainingDir, imageDir)\n', (3428, 3455), False, 'import os\n'), ((4647, 4698), 'os.path.join', 'os.path.join', (['osmcfg.rootOsmDir', 'classDir', 'fileName'], {}), '(osmcfg.rootOsmDir, classDir, fileName)\n', (4659, 4698), False, 'import os\n'), ((8641, 8662), 'numpy.copy', 'np.copy', (['imageNoMasks'], {}), '(imageNoMasks)\n', (8648, 8662), True, 'import numpy as np\n'), ((845, 946), 'QuadKey.quadkey.TileSystem.pixel_to_geo', 'quadkey.TileSystem.pixel_to_geo', (['(pt[0, 0] + tilePixel[0], pt[0, 1] + tilePixel[1])', 'qkRoot.level'], {}), '((pt[0, 0] + tilePixel[0], pt[0, 1] +\n tilePixel[1]), qkRoot.level)\n', (876, 946), True, 'import QuadKey.quadkey as quadkey\n'), ((2001, 2031), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['cnt', '(2)', '(True)'], {}), '(cnt, 2, True)\n', (2017, 2031), False, 'import cv2\n'), ((2324, 2390), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image[bbTop:bbTop + bbHeight, bbLeft:bbLeft + bbWidth]'], {}), '(image[bbTop:bbTop + bbHeight, bbLeft:bbLeft + bbWidth])\n', (2334, 2390), True, 'import matplotlib.pyplot as plt\n'), ((3474, 3497), 'os.path.split', 'os.path.split', (['imageDir'], {}), '(imageDir)\n', (3487, 3497), False, 'import os\n'), ((4767, 4802), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '"""\t"""'}), "(csvfile, delimiter='\\t')\n", (4777, 4802), False, 'import csv\n'), ((8252, 8312), 'skimage.draw.polygon_perimeter', 'draw.polygon_perimeter', (['xs', 'ys', '(maxImageSize, maxImageSize)'], {}), '(xs, ys, (maxImageSize, maxImageSize))\n', (8274, 8312), False, 'from skimage import draw\n'), ((8592, 8623), 'numpy.sum', 'np.sum', (['mask[:, -edgePixels:-1]'], {}), '(mask[:, -edgePixels:-1])\n', (8598, 8623), True, 'import numpy as np\n'), ((8784, 8818), 'osmmodelconfig.featureNames.keys', 'osmmodelconfig.featureNames.keys', ([], {}), '()\n', (8816, 8818), False, 'import osmmodelconfig\n'), ((4937, 4993), 'QuadKey.quadkey.TileSystem.geo_to_pixel', 'quadkey.TileSystem.geo_to_pixel', (['latLot', 'osmcfg.tileZoom'], {}), '(latLot, osmcfg.tileZoom)\n', (4968, 4993), True, 'import QuadKey.quadkey as quadkey\n'), ((5082, 5103), 'shapely.geometry.Polygon', 'geometry.Polygon', (['pts'], {}), '(pts)\n', (5098, 5103), True, 'import shapely.geometry as geometry\n'), ((8560, 8589), 'numpy.sum', 'np.sum', (['mask[:, 0:edgePixels]'], {}), '(mask[:, 0:edgePixels])\n', (8566, 8589), True, 'import numpy as np\n'), ((9954, 9988), 'osmmodelconfig.featureNames.keys', 'osmmodelconfig.featureNames.keys', ([], {}), '()\n', (9986, 9988), False, 'import osmmodelconfig\n'), ((10578, 10610), 'cv2.threshold', 'cv2.threshold', (['mask', '(127)', '(255)', '(0)'], {}), '(mask, 127, 255, 0)\n', (10591, 10610), False, 'import cv2\n'), ((10645, 10711), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (10661, 10711), False, 'import cv2\n'), ((10759, 10791), 'cv2.boundingRect', 'cv2.boundingRect', (['rawContours[0]'], {}), '(rawContours[0])\n', (10775, 10791), False, 'import cv2\n'), ((11202, 11223), 'numpy.copy', 'np.copy', (['imageNoMasks'], {}), '(imageNoMasks)\n', (11209, 11223), True, 'import numpy as np\n'), ((11240, 11296), 'cv2.drawContours', 'cv2.drawContours', (['image', 'rawContours', '(-1)', '(0, 255, 0)', '(2)'], {}), '(image, rawContours, -1, (0, 255, 0), 2)\n', (11256, 11296), False, 'import cv2\n'), ((11664, 11685), 'numpy.copy', 'np.copy', (['imageNoMasks'], {}), '(imageNoMasks)\n', (11671, 11685), True, 'import numpy as np\n'), ((11702, 11760), 'cv2.drawContours', 'cv2.drawContours', (['image', 'simpleContour', '(-1)', '(0, 255, 0)', '(2)'], {}), '(image, simpleContour, -1, (0, 255, 0), 2)\n', (11718, 11760), False, 'import cv2\n'), ((8494, 8523), 'numpy.sum', 'np.sum', (['mask[0:edgePixels, :]'], {}), '(mask[0:edgePixels, :])\n', (8500, 8523), True, 'import numpy as np\n'), ((8526, 8557), 'numpy.sum', 'np.sum', (['mask[-edgePixels:-1, :]'], {}), '(mask[-edgePixels:-1, :])\n', (8532, 8557), True, 'import numpy as np\n'), ((11474, 11540), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image[bbTop:bbTop + bbHeight, bbLeft:bbLeft + bbWidth]'], {}), '(image[bbTop:bbTop + bbHeight, bbLeft:bbLeft + bbWidth])\n', (11484, 11540), True, 'import matplotlib.pyplot as plt\n'), ((11587, 11617), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['cnt', '(5)', '(True)'], {}), '(cnt, 5, True)\n', (11603, 11617), False, 'import cv2\n'), ((11942, 12008), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image[bbTop:bbTop + bbHeight, bbLeft:bbLeft + bbWidth]'], {}), '(image[bbTop:bbTop + bbHeight, bbLeft:bbLeft + bbWidth])\n', (11952, 12008), True, 'import matplotlib.pyplot as plt\n'), ((12512, 12609), 'skimage.io.imsave', 'io.imsave', (['debugFileName', 'image[bbTop:bbTop + bbHeight, bbLeft:bbLeft + bbWidth]'], {'quality': '(100)'}), '(debugFileName, image[bbTop:bbTop + bbHeight, bbLeft:bbLeft +\n bbWidth], quality=100)\n', (12521, 12609), False, 'from skimage import io\n'), ((14564, 14595), 'numpy.mean', 'np.mean', (['rawContours[0]'], {'axis': '(0)'}), '(rawContours[0], axis=0)\n', (14571, 14595), True, 'import numpy as np\n'), ((17508, 17539), 'numpy.mean', 'np.mean', (['rawContours[0]'], {'axis': '(0)'}), '(rawContours[0], axis=0)\n', (17515, 17539), True, 'import numpy as np\n'), ((9467, 9517), 'skimage.draw.polygon', 'draw.polygon', (['xs', 'ys', '(maxImageSize, maxImageSize)'], {}), '(xs, ys, (maxImageSize, maxImageSize))\n', (9479, 9517), False, 'from skimage import draw\n'), ((9650, 9665), 'numpy.sum', 'np.sum', (['maskAnd'], {}), '(maskAnd)\n', (9656, 9665), True, 'import numpy as np\n'), ((13571, 13595), 'shapely.geometry.LineString', 'geometry.LineString', (['pts'], {}), '(pts)\n', (13590, 13595), True, 'import shapely.geometry as geometry\n'), ((13632, 13684), 'shapely.affinity.translate', 'affinity.translate', (['fitShape', '(-width / 2)', '(-width / 2)'], {}), '(fitShape, -width / 2, -width / 2)\n', (13650, 13684), True, 'import shapely.affinity as affinity\n'), ((13716, 13748), 'shapely.affinity.rotate', 'affinity.rotate', (['fitShape', 'angle'], {}), '(fitShape, angle)\n', (13731, 13748), True, 'import shapely.affinity as affinity\n'), ((13784, 13830), 'shapely.affinity.translate', 'affinity.translate', (['fitShape', 'centerX', 'centerY'], {}), '(fitShape, centerX, centerY)\n', (13802, 13830), True, 'import shapely.affinity as affinity\n'), ((15006, 15048), 'numpy.array', 'np.array', (['[centerX, centerY, width, angle]'], {}), '([centerX, centerY, width, angle])\n', (15014, 15048), True, 'import numpy as np\n'), ((16833, 16857), 'shapely.geometry.LineString', 'geometry.LineString', (['pts'], {}), '(pts)\n', (16852, 16857), True, 'import shapely.geometry as geometry\n'), ((16894, 16944), 'shapely.affinity.rotate', 'affinity.rotate', (['fitShape', 'angle'], {'use_radians': '(True)'}), '(fitShape, angle, use_radians=True)\n', (16909, 16944), True, 'import shapely.affinity as affinity\n'), ((16980, 17026), 'shapely.affinity.translate', 'affinity.translate', (['fitShape', 'centerX', 'centerY'], {}), '(fitShape, centerX, centerY)\n', (16998, 17026), True, 'import shapely.affinity as affinity\n'), ((17970, 18020), 'numpy.array', 'np.array', (['[centerX, centerY, width, height, angle]'], {}), '([centerX, centerY, width, height, angle])\n', (17978, 18020), True, 'import numpy as np\n'), ((14877, 14908), 'cv2.contourArea', 'cv2.contourArea', (['rawContours[0]'], {}), '(rawContours[0])\n', (14892, 14908), False, 'import cv2\n'), ((17796, 17827), 'cv2.contourArea', 'cv2.contourArea', (['rawContours[0]'], {}), '(rawContours[0])\n', (17811, 17827), False, 'import cv2\n'), ((13290, 13301), 'math.cos', 'math.cos', (['r'], {}), '(r)\n', (13298, 13301), False, 'import math\n'), ((13340, 13351), 'math.sin', 'math.sin', (['r'], {}), '(r)\n', (13348, 13351), False, 'import math\n'), ((14167, 14188), 'shapely.geometry.Point', 'geometry.Point', (['pt[0]'], {}), '(pt[0])\n', (14181, 14188), True, 'import shapely.geometry as geometry\n'), ((15961, 16001), 'math.fabs', 'math.fabs', (['(result.x[3] - bestResult.x[3])'], {}), '(result.x[3] - bestResult.x[3])\n', (15970, 16001), False, 'import math\n'), ((17325, 17346), 'shapely.geometry.Point', 'geometry.Point', (['pt[0]'], {}), '(pt[0])\n', (17339, 17346), True, 'import shapely.geometry as geometry\n'), ((14452, 14464), 'math.fabs', 'math.fabs', (['d'], {}), '(d)\n', (14461, 14464), False, 'import math\n')] |
from tensorboardX import SummaryWriter
import os, glob, csv, random, time
import datetime, time, json, shutil
from tqdm import tqdm
import numpy as np
import PIL, cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from PIL import Image
import matplotlib
from matplotlib import pyplot as plt
from model import Model
import utils
from data import Data_load, data_generator
def torch_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed) # Numpy module.
random.seed(seed) # Python random module.
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
class Trainer():
def __init__(self, dataset, epochs=10, lr=0.01, momentum=0.8, bz=20,
weight_decay=1e-4, lr_gamma=0.8, best_acc=None,
best_weight='', copy_code_flag=True, data_instance=None):
self.dataset=dataset
self.bz = bz
self.lr = lr
self.lr_gamma = lr_gamma
self.epochs = epochs
self.momentum = momentum
self.weight_decay = weight_decay
self.best_weight = best_weight
self.copy_code_flag = copy_code_flag
self.net = Model().cuda()
self.metric_list = ['kld', 'cc','sim', 'auc_j', 'nss']
self.outpath = './runs/'+'%s_'%dataset+str(datetime.datetime.now())[:-7].replace(' ', '_').replace(':','')
# self.best_weight = self.outpath+'/best_weight_%s.pt'%dataset
self.writer = SummaryWriter(self.outpath)
self.optimizer = self._optimizer()
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer,
gamma = self.lr_gamma, step_size=1)
self.criteon = nn.BCELoss().cuda()
self.history_loss = {"train":[],"valid":[], "test":[]}
self.history_metric={"train":{},"valid":{}, "test":{}}
self.bestepoch = []
self.test_predictions = []
self.test_labels = []
self.other_map = data_instance.other_map(dataset)
if self.copy_code_flag:
self.copy_code()
for key in self.history_metric:
for metric_name in self.metric_list:
self.history_metric[key][metric_name]=[]
def _optimizer(self):
optimizer = torch.optim.Adam(self.net.parameters(), lr = self.lr,
weight_decay=self.weight_decay)
return optimizer
def fit(self, train_loader, val_loader, clip_=False):
if clip_: # dowmsampling for large datasets
clip_dict={"EyeTrack":1, "DReye":4, "DADA2000":7, "BDDA":9}
clip_num = clip_dict[self.dataset]
clip_batch = int(len(train_loader)/clip_num)
for epoch in range(self.epochs):
self.net.train()
losses = 0
metrics = [0]*len(self.metric_list)
# phar = tqdm(total=len(train_loader.dataset))
timestamp = time.time()
print('########## epoch {:0>3d}; \t lr: {:.8f} ##########'.format(epoch,
self.optimizer.param_groups[0]["lr"]))
for batch_idx, (input, target) in enumerate(train_loader):
input = input.cuda()
target = target.cuda()
output = self.net(input)
loss = self.criteon(output, target)
metric = self.metric_compute(output, target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
losses += loss
for nn in range(len(self.metric_list)):
metrics[nn] += metric[nn].item()
if batch_idx % 200 == 0:
# phar.update(len(img)*100)
print('\n Train Epoch: {}/{} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, self.epochs, batch_idx * len(input), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()),
'\t new_kld:%.3f, new_cc:%.3f, new_sim:%.3f'%(metric[0].item(),
metric[1].item(), metric[2].item()),
'\t Cost time:{} s'.format(int(time.time()-timestamp)))
timestamp = time.time()
if clip_ and batch_idx>clip_batch:
break
train_loss = losses/batch_idx
self.history_loss["train"].append(float(train_loss))
for ii, metric_name in enumerate(self.metric_list):
self.history_metric['train'][metric_name].append(metrics[ii]/batch_idx)
print('train %s: %.3f \t'%( metric_name, metrics[ii]/batch_idx), end='')
print()
print('Train average loss:', train_loss)
self.writer.add_scalar('train_loss', train_loss, epoch)
# self.writer.add_scalar('new_kld', metrics[0]/batch_idx, epoch)
# self.writer.add_scalar('cc', metrics[1]/batch_idx, epoch)
# self.writer.add_scalar('new_cc', metrics[2]/batch_idx, epoch)
# phar.close()
break_flag = self.evaluate('valid', val_loader, epoch)
self.scheduler.step()
if break_flag:
break
def evaluate(self, phase, db_loader, epoch=None):
if phase == 'test' or phase=='infer':
self.net.load_state_dict(torch.load(self.best_weight))
print('Load the best weight of trained on %s:'%self.dataset,
'predict for %s'%dataset)
if phase=='infer':
time_list=[]
self.net.eval()
with torch.no_grad():
losses = 0
metrics = [0]*len(self.metric_list)
for batch_idx, data_batch in enumerate(db_loader):
(frame_bh, map_bh, fix_bh) = data_batch
frame_bh = frame_bh.cuda()
map_bh = map_bh.cuda()
t0=time.time()
logits = self.net(frame_bh)
if phase=='infer':
time_list.append(time.time()-t0)
losses += self.criteon(logits, map_bh).item()
if batch_idx>100 and phase=='infer':
print('cost time: %.6fms'%(np.mean(time_list)*1000))
metric = self.metric_compute(logits, map_bh, fix_bh)
for nn in range(len(self.metric_list)):
metrics[nn] += metric[nn].item()
for ii, metric_name in enumerate(self.metric_list):
if phase=="valid":
self.history_metric[phase][metric_name].append(metrics[ii]/(batch_idx+1))
else:
self.history_metric[phase][metric_name]=metrics[ii]/(batch_idx+1)
print(phase, '%s: %.3f \t'%(metric_name, metrics[ii]/(batch_idx+1)), end='')
print()
losses /= (batch_idx+1)
print(phase+'_set: Average loss: {:.4f}\n'.format(
losses))
if phase == 'valid':
break_flag=False
self.writer.add_scalar(phase+'_loss', losses, epoch)
self.history_loss["valid"].append(float(losses))
best_loss = min(self.history_loss["valid"])
if losses == best_loss:
self.bestepoch = int(epoch)
# self.best_weight = self.outpath+'/best_weight_%s_epoch%d.pt'%(self.dataset, epoch)
torch.save(self.net.state_dict(), self.best_weight)
print('write the best loss {:.2f} weight file in epoch {}'.format(losses, epoch))
if epoch-self.bestepoch>3:
print('early stopping')
break_flag=True
return break_flag
elif phase == "test":
self.history_loss["test"] = float(losses)
metric_file = self.outpath+'/Train_%s_for_%s_metric_history.json'%(self.dataset, dataset)
if self.dataset==dataset:
loss_file = self.outpath+'/%s_loss_history.json'%self.dataset
utils.write_json(self.history_loss, loss_file)
utils.write_json(self.history_metric, metric_file)
else:
# only write metrics in tset phase for predicted datasets
utils.write_json(self.history_metric['test'], metric_file)
def metric_compute(self, pred, sal, fix):
# the mean of metrics in every batch
num_ =len(self.metric_list)
fix_np = fix.detach().cpu().numpy()
pred_r = (pred/pred.max()*255)
pred_r = np.array(pred_r.detach().cpu().numpy(), dtype=np.uint8)
pred_np=[]
for ii in range(pred_r.shape[0]):
pred_s = cv2.resize(np.squeeze(pred_r[ii]), (1280, 720))
pred_np.append(pred_s.astype('float32')/255.)
pred_np = np.stack(pred_np)
pred_resize = torch.from_numpy(pred_np)
sal_r = (sal/sal.max()*255)
sal_r = np.array(sal_r.detach().cpu().numpy(), dtype=np.uint8)
sal_np=[]
for ii in range(pred_r.shape[0]):
sal_s = cv2.resize(np.squeeze(sal_r[ii]), (1280, 720))
sal_np.append(sal_s.astype('float32')/255.)
sal_np = np.stack(sal_np)
metrics = torch.zeros(num_, dtype=float)
for ii in range(num_):
metric_name = self.metric_list[ii]
if metric_name=='kld':
metrics[ii] = utils.new_kld(pred, sal).mean()
elif metric_name=='cc':
metrics[ii] = utils.new_cc(pred, sal).mean()
elif metric_name=='sim':
metrics[ii] = utils.new_sim(pred, sal).mean()
elif metric_name=='nss':
metrics[ii] = utils.nss(pred_resize, fix.type(torch.bool)).mean()
elif metric_name=='auc_s':
# 偏小 原因未知取值与other_map的采样有关,但是基本不大约30。DADA,DReye都没公开计算代码
# metrics[ii] = utils.auc_shuff_acl(pred_np, fix_np, self.other_map)
auc_s=[]
for jj in range(pred_r.shape[0]):
auc = utils.auc_shuff_acl(pred_np[jj], fix_np[jj], self.other_map)
auc_s.append(auc)
metrics[ii] = np.mean(auc_s)
elif metric_name=='ig':
metrics[ii] = utils.information_gain(fix_np, pred_np, sal_np)
elif metric_name=='auc_j':
auc_j=[]
for jj in range(pred_r.shape[0]):
auc = utils.auc_judd(pred_np[jj], fix_np[jj])
auc_j.append(auc)
metrics[ii] = np.mean(auc_j)
return metrics
def copy_code(self):
code_list = ["run.py","model.py", "utils.py", "data.py",'inference.py']
code_path = self.outpath+"/copy_code/"
if not os.path.exists(code_path):
os.mkdir(code_path)
for code in code_list:
shutil.copy2('./'+code, code_path+code)
print('copy the codes:', code_list, 'into the:', code_path)
if __name__=="__main__":
dataset_list=["EyeTrack", "DReye", "DADA2000", "BDDA"]
num_workers= 0
bz = 8
torch_seed(2017)
Data_lister=Data_load()
# frame_list, map_list = Data_lister.load_list('EyeTrack','train')
# """
for dataset in ["EyeTrack"]:
train_loader= data_generator(dataset, 'train', Data_lister, bz, num_workers)
val_loader= data_generator(dataset, 'valid', Data_lister, bz, num_workers)
# train on specific datasets
Trainer_ = Trainer(dataset=dataset,
epochs=20,
lr=1e-3,
lr_gamma=0.9,
momentum=0.9,
weight_decay=1e-4,
bz=bz,
data_instance = Data_lister,
best_weight='./runs/EyeTrack_2021-12-07_200847/best_weight_EyeTrack.pt'
)
# Trainer_.fit(train_loader, val_loader, clip_=True)
# predict in valid sets of all datasets
for dataset in ["EyeTrack"]:
test_loader= data_generator(dataset, 'test', Data_lister, bz, num_workers)
Trainer_.evaluate('test', test_loader)
# """
# num_workers= 0
# bz = 1
# # test inference time
# for dataset in dataset_list:
# Trainer_ = Trainer(dataset=dataset,
# best_weight='./runs/EyeTrack_2021-11-15_171929/best_weight_EyeTrack.pt',
# bz=bz,
# copy_code_flag=False
# )
# # predict in valid sets of all datasets
# for dataset in dataset_list:
# test_loader= data_generator(dataset, 'test', Data_lister, bz, num_workers)
# Trainer_.evaluate('infer', test_loader) | [
"model.Model",
"torch.from_numpy",
"os.path.exists",
"numpy.mean",
"tensorboardX.SummaryWriter",
"shutil.copy2",
"utils.new_sim",
"numpy.stack",
"utils.new_cc",
"numpy.random.seed",
"os.mkdir",
"data.data_generator",
"utils.new_kld",
"numpy.squeeze",
"utils.information_gain",
"data.Dat... | [((483, 506), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (500, 506), False, 'import torch\n'), ((512, 540), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (534, 540), False, 'import torch\n'), ((546, 578), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (572, 578), False, 'import torch\n'), ((586, 606), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (600, 606), True, 'import numpy as np\n'), ((629, 646), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (640, 646), False, 'import os, glob, csv, random, time\n'), ((677, 700), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (694, 700), False, 'import torch\n'), ((11636, 11647), 'data.Data_load', 'Data_load', ([], {}), '()\n', (11645, 11647), False, 'from data import Data_load, data_generator\n'), ((1645, 1672), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['self.outpath'], {}), '(self.outpath)\n', (1658, 1672), False, 'from tensorboardX import SummaryWriter\n'), ((1745, 1830), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['self.optimizer'], {'gamma': 'self.lr_gamma', 'step_size': '(1)'}), '(self.optimizer, gamma=self.lr_gamma,\n step_size=1)\n', (1776, 1830), False, 'import torch\n'), ((9255, 9272), 'numpy.stack', 'np.stack', (['pred_np'], {}), '(pred_np)\n', (9263, 9272), True, 'import numpy as np\n'), ((9296, 9321), 'torch.from_numpy', 'torch.from_numpy', (['pred_np'], {}), '(pred_np)\n', (9312, 9321), False, 'import torch\n'), ((9639, 9655), 'numpy.stack', 'np.stack', (['sal_np'], {}), '(sal_np)\n', (9647, 9655), True, 'import numpy as np\n'), ((9677, 9707), 'torch.zeros', 'torch.zeros', (['num_'], {'dtype': 'float'}), '(num_, dtype=float)\n', (9688, 9707), False, 'import torch\n'), ((11806, 11868), 'data.data_generator', 'data_generator', (['dataset', '"""train"""', 'Data_lister', 'bz', 'num_workers'], {}), "(dataset, 'train', Data_lister, bz, num_workers)\n", (11820, 11868), False, 'from data import Data_load, data_generator\n'), ((11894, 11956), 'data.data_generator', 'data_generator', (['dataset', '"""valid"""', 'Data_lister', 'bz', 'num_workers'], {}), "(dataset, 'valid', Data_lister, bz, num_workers)\n", (11908, 11956), False, 'from data import Data_load, data_generator\n'), ((3133, 3144), 'time.time', 'time.time', ([], {}), '()\n', (3142, 3144), False, 'import datetime, time, json, shutil\n'), ((5937, 5952), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5950, 5952), False, 'import torch\n'), ((11239, 11264), 'os.path.exists', 'os.path.exists', (['code_path'], {}), '(code_path)\n', (11253, 11264), False, 'import os, glob, csv, random, time\n'), ((11279, 11298), 'os.mkdir', 'os.mkdir', (['code_path'], {}), '(code_path)\n', (11287, 11298), False, 'import os, glob, csv, random, time\n'), ((11344, 11387), 'shutil.copy2', 'shutil.copy2', (["('./' + code)", '(code_path + code)'], {}), "('./' + code, code_path + code)\n", (11356, 11387), False, 'import datetime, time, json, shutil\n'), ((12718, 12779), 'data.data_generator', 'data_generator', (['dataset', '"""test"""', 'Data_lister', 'bz', 'num_workers'], {}), "(dataset, 'test', Data_lister, bz, num_workers)\n", (12732, 12779), False, 'from data import Data_load, data_generator\n'), ((1355, 1362), 'model.Model', 'Model', ([], {}), '()\n', (1360, 1362), False, 'from model import Model\n'), ((1878, 1890), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (1888, 1890), True, 'import torch.nn as nn\n'), ((5686, 5714), 'torch.load', 'torch.load', (['self.best_weight'], {}), '(self.best_weight)\n', (5696, 5714), False, 'import torch\n'), ((6252, 6263), 'time.time', 'time.time', ([], {}), '()\n', (6261, 6263), False, 'import datetime, time, json, shutil\n'), ((9139, 9161), 'numpy.squeeze', 'np.squeeze', (['pred_r[ii]'], {}), '(pred_r[ii])\n', (9149, 9161), True, 'import numpy as np\n'), ((9527, 9548), 'numpy.squeeze', 'np.squeeze', (['sal_r[ii]'], {}), '(sal_r[ii])\n', (9537, 9548), True, 'import numpy as np\n'), ((4508, 4519), 'time.time', 'time.time', ([], {}), '()\n', (4517, 4519), False, 'import datetime, time, json, shutil\n'), ((8446, 8492), 'utils.write_json', 'utils.write_json', (['self.history_loss', 'loss_file'], {}), '(self.history_loss, loss_file)\n', (8462, 8492), False, 'import utils\n'), ((8510, 8560), 'utils.write_json', 'utils.write_json', (['self.history_metric', 'metric_file'], {}), '(self.history_metric, metric_file)\n', (8526, 8560), False, 'import utils\n'), ((8672, 8730), 'utils.write_json', 'utils.write_json', (["self.history_metric['test']", 'metric_file'], {}), "(self.history_metric['test'], metric_file)\n", (8688, 8730), False, 'import utils\n'), ((9855, 9879), 'utils.new_kld', 'utils.new_kld', (['pred', 'sal'], {}), '(pred, sal)\n', (9868, 9879), False, 'import utils\n'), ((6385, 6396), 'time.time', 'time.time', ([], {}), '()\n', (6394, 6396), False, 'import datetime, time, json, shutil\n'), ((9955, 9978), 'utils.new_cc', 'utils.new_cc', (['pred', 'sal'], {}), '(pred, sal)\n', (9967, 9978), False, 'import utils\n'), ((6590, 6608), 'numpy.mean', 'np.mean', (['time_list'], {}), '(time_list)\n', (6597, 6608), True, 'import numpy as np\n'), ((10055, 10079), 'utils.new_sim', 'utils.new_sim', (['pred', 'sal'], {}), '(pred, sal)\n', (10068, 10079), False, 'import utils\n'), ((10642, 10656), 'numpy.mean', 'np.mean', (['auc_s'], {}), '(auc_s)\n', (10649, 10656), True, 'import numpy as np\n'), ((1486, 1509), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1507, 1509), False, 'import datetime, time, json, shutil\n'), ((4450, 4461), 'time.time', 'time.time', ([], {}), '()\n', (4459, 4461), False, 'import datetime, time, json, shutil\n'), ((10511, 10571), 'utils.auc_shuff_acl', 'utils.auc_shuff_acl', (['pred_np[jj]', 'fix_np[jj]', 'self.other_map'], {}), '(pred_np[jj], fix_np[jj], self.other_map)\n', (10530, 10571), False, 'import utils\n'), ((10725, 10772), 'utils.information_gain', 'utils.information_gain', (['fix_np', 'pred_np', 'sal_np'], {}), '(fix_np, pred_np, sal_np)\n', (10747, 10772), False, 'import utils\n'), ((11027, 11041), 'numpy.mean', 'np.mean', (['auc_j'], {}), '(auc_j)\n', (11034, 11041), True, 'import numpy as np\n'), ((10917, 10956), 'utils.auc_judd', 'utils.auc_judd', (['pred_np[jj]', 'fix_np[jj]'], {}), '(pred_np[jj], fix_np[jj])\n', (10931, 10956), False, 'import utils\n')] |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
class TensorStandardScaler:
"""Helper class for automatically normalizing inputs into the network.
"""
def __init__(self, x_dim, suffix):
"""Initializes a scaler.
Arguments:
x_dim (int): The dimensionality of the inputs into the scaler.
Returns: None.
"""
self.fitted = False
with tf.variable_scope("Scaler"):
self.mu = tf.get_variable(
name="scaler_mu" + suffix, shape=[1, x_dim], initializer=tf.constant_initializer(0.0),
trainable=False
)
self.sigma = tf.get_variable(
name="scaler_std" + suffix, shape=[1, x_dim], initializer=tf.constant_initializer(1.0),
trainable=False
)
self.cached_mu, self.cached_sigma = np.zeros([0, x_dim]), np.ones([1, x_dim])
def fit(self, data):
"""Runs two ops, one for assigning the mean of the data to the internal mean, and
another for assigning the standard deviation of the data to the internal standard deviation.
This function must be called within a 'with <session>.as_default()' block.
Arguments:
data (np.ndarray): A numpy array containing the input
Returns: None.
"""
mu = np.mean(data, axis=0, keepdims=True)
sigma = np.std(data, axis=0, keepdims=True)
sigma[sigma < 1e-12] = 1.0
self.mu.load(mu)
self.sigma.load(sigma)
self.fitted = True
self.cache()
def transform(self, data):
"""Transforms the input matrix data using the parameters of this scaler.
Arguments:
data (np.array): A numpy array containing the points to be transformed.
Returns: (np.array) The transformed dataset.
"""
return (data - self.mu) / self.sigma
def inverse_transform(self, data):
"""Undoes the transformation performed by this scaler.
Arguments:
data (np.array): A numpy array containing the points to be transformed.
Returns: (np.array) The transformed dataset.
"""
return self.sigma * data + self.mu
def get_vars(self):
"""Returns a list of variables managed by this object.
Returns: (list<tf.Variable>) The list of variables.
"""
return [self.mu, self.sigma]
def cache(self):
"""Caches current values of this scaler.
Returns: None.
"""
self.cached_mu = self.mu.eval()
self.cached_sigma = self.sigma.eval()
def load_cache(self):
"""Loads values from the cache
Returns: None.
"""
self.mu.load(self.cached_mu)
self.sigma.load(self.cached_sigma)
| [
"numpy.mean",
"numpy.ones",
"tensorflow.variable_scope",
"numpy.zeros",
"tensorflow.constant_initializer",
"numpy.std"
] | [((1433, 1469), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)', 'keepdims': '(True)'}), '(data, axis=0, keepdims=True)\n', (1440, 1469), True, 'import numpy as np\n'), ((1486, 1521), 'numpy.std', 'np.std', (['data'], {'axis': '(0)', 'keepdims': '(True)'}), '(data, axis=0, keepdims=True)\n', (1492, 1521), True, 'import numpy as np\n'), ((506, 533), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Scaler"""'], {}), "('Scaler')\n", (523, 533), True, 'import tensorflow as tf\n'), ((960, 980), 'numpy.zeros', 'np.zeros', (['[0, x_dim]'], {}), '([0, x_dim])\n', (968, 980), True, 'import numpy as np\n'), ((982, 1001), 'numpy.ones', 'np.ones', (['[1, x_dim]'], {}), '([1, x_dim])\n', (989, 1001), True, 'import numpy as np\n'), ((647, 675), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (670, 675), True, 'import tensorflow as tf\n'), ((839, 867), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (862, 867), True, 'import tensorflow as tf\n')] |
from __future__ import print_function
import numpy as np
import math
from scipy.misc import logsumexp
import torch
import torch.utils.data
import torch.nn as nn
from torch.nn import Linear
from torch.autograd import Variable
from ..utils.distributions import log_Bernoulli, log_Normal_diag, log_Normal_standard, log_Logistic_256
from ..utils.visual_evaluation import plot_histogram
from ..utils.nn import he_init, GatedDense, NonLinear
from .Model import Model
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#=======================================================================================================================
class VAE(Model):
def __init__(self, args):
super(VAE, self).__init__(args)
# encoder: q(z | x)
self.q_z_layers = nn.Sequential(
GatedDense(np.prod(self.args.input_size), 300),
GatedDense(300, 300)
)
self.q_z_mean = Linear(300, self.args.z1_size)
self.q_z_logvar = NonLinear(300, self.args.z1_size, activation=nn.Hardtanh(min_val=-6., max_val=2.))
# decoder: p(x | z)
self.p_x_layers = nn.Sequential(
GatedDense(self.args.z1_size, 300),
GatedDense(300, 300)
)
if self.args.input_type == 'binary':
self.p_x_mean = NonLinear(300, np.prod(self.args.input_size), activation=nn.Sigmoid())
elif self.args.input_type == 'gray' or self.args.input_type == 'continuous':
self.p_x_mean = NonLinear(300, np.prod(self.args.input_size), activation=nn.Sigmoid())
self.p_x_logvar = NonLinear(300, np.prod(self.args.input_size),
activation=nn.Hardtanh(min_val=-4.5, max_val=0))
# weights initialization
for m in self.modules():
if isinstance(m, nn.Linear):
he_init(m)
# add pseudo-inputs if VampPrior
if self.args.prior == 'vampprior':
self.add_pseudoinputs()
# AUXILIARY METHODS
def calculate_loss(self, x, beta=1., average=False):
'''
:param x: input image(s)
:param beta: a hyperparam for warmup
:param average: whether to average loss or not
:return: value of a loss function
'''
# pass through VAE
x_mean, x_logvar, z_q, z_q_mean, z_q_logvar = self.forward(x)
# RE
if self.args.input_type == 'binary':
RE = log_Bernoulli(x, x_mean, dim=1)
elif self.args.input_type == 'gray' or self.args.input_type == 'continuous':
RE = -log_Logistic_256(x, x_mean, x_logvar, dim=1)
else:
raise Exception('Wrong input type!')
# KL
log_p_z = self.log_p_z(z_q)
log_q_z = log_Normal_diag(z_q, z_q_mean, z_q_logvar, dim=1)
KL = -(log_p_z - log_q_z)
loss = - RE + beta * KL
if average:
loss = torch.mean(loss)
RE = torch.mean(RE)
KL = torch.mean(KL)
return loss, RE, KL
def calculate_likelihood(self, X, dir, mode='test', S=5000, MB=100):
# set auxiliary variables for number of training and test sets
N_test = X.size(0)
# init list
likelihood_test = []
if S <= MB:
R = 1
else:
R = S / MB
S = MB
for j in range(N_test):
if j % 100 == 0:
print('{:.2f}%'.format(j / (1. * N_test) * 100))
# Take x*
x_single = X[j].unsqueeze(0)
a = []
for r in range(0, int(R)):
# Repeat it for all training points
x = x_single.expand(S, x_single.size(1))
a_tmp, _, _ = self.calculate_loss(x)
a.append(-a_tmp.cpu().data.numpy())
# calculate max
a = np.asarray(a)
a = np.reshape(a, (a.shape[0] * a.shape[1], 1))
likelihood_x = logsumexp(a)
likelihood_test.append(likelihood_x - np.log(len(a)))
likelihood_test = np.array(likelihood_test)
plot_histogram(-likelihood_test, dir, mode)
return -np.mean(likelihood_test)
def calculate_lower_bound(self, X_full, MB=100):
# CALCULATE LOWER BOUND:
lower_bound = 0.
RE_all = 0.
KL_all = 0.
I = int(math.ceil(X_full.size(0) / MB))
for i in range(I):
x = X_full[i * MB: (i + 1) * MB].view(-1, np.prod(self.args.input_size))
loss, RE, KL = self.calculate_loss(x, average=True)
RE_all += RE.cpu().data[0]
KL_all += KL.cpu().data[0]
lower_bound += loss.cpu().data[0]
lower_bound /= I
return lower_bound
# ADDITIONAL METHODS
def generate_x(self, N=25):
if self.args.prior == 'standard':
z_sample_rand = Variable(torch.FloatTensor(N, self.args.z1_size).normal_())
if self.args.cuda:
z_sample_rand = z_sample_rand.cuda()
elif self.args.prior == 'vampprior':
means = self.means(self.idle_input)[0:N]
z_sample_gen_mean, z_sample_gen_logvar = self.q_z(means)
z_sample_rand = self.reparameterize(z_sample_gen_mean, z_sample_gen_logvar)
samples_rand, _ = self.p_x(z_sample_rand)
return samples_rand
def reconstruct_x(self, x):
x_mean, _, _, _, _ = self.forward(x)
return x_mean
# THE MODEL: VARIATIONAL POSTERIOR
def q_z(self, x):
x = self.q_z_layers(x)
z_q_mean = self.q_z_mean(x)
z_q_logvar = self.q_z_logvar(x)
return z_q_mean, z_q_logvar
# THE MODEL: GENERATIVE DISTRIBUTION
def p_x(self, z):
z = self.p_x_layers(z)
x_mean = self.p_x_mean(z)
if self.args.input_type == 'binary':
x_logvar = 0.
else:
x_mean = torch.clamp(x_mean, min=0. + 1. / 512., max=1. - 1. / 512.)
x_logvar = self.p_x_logvar(z)
return x_mean, x_logvar
# the prior
def log_p_z(self, z):
if self.args.prior == 'standard':
log_prior = log_Normal_standard(z, dim=1)
elif self.args.prior == 'vampprior':
# z - MB x M
C = self.args.number_components
# calculate params
X = self.means(self.idle_input)
# calculate params for given data
z_p_mean, z_p_logvar = self.q_z(X) # C x M
# expand z
z_expand = z.unsqueeze(1)
means = z_p_mean.unsqueeze(0)
logvars = z_p_logvar.unsqueeze(0)
a = log_Normal_diag(z_expand, means, logvars, dim=2) - math.log(C) # MB x C
a_max, _ = torch.max(a, 1) # MB x 1
# calculte log-sum-exp
log_prior = a_max + torch.log(torch.sum(torch.exp(a - a_max.unsqueeze(1)), 1)) # MB x 1
else:
raise Exception('Wrong name of the prior!')
return log_prior
# THE MODEL: FORWARD PASS
def forward(self, x):
# z ~ q(z | x)
z_q_mean, z_q_logvar = self.q_z(x)
z_q = self.reparameterize(z_q_mean, z_q_logvar)
# x_mean = p(x|z)
x_mean, x_logvar = self.p_x(z_q)
return x_mean, x_logvar, z_q, z_q_mean, z_q_logvar
| [
"torch.nn.Hardtanh",
"numpy.mean",
"numpy.prod",
"torch.nn.Sigmoid",
"numpy.reshape",
"torch.mean",
"torch.max",
"numpy.asarray",
"math.log",
"numpy.array",
"torch.nn.Linear",
"torch.FloatTensor",
"torch.clamp",
"scipy.misc.logsumexp"
] | [((1000, 1030), 'torch.nn.Linear', 'Linear', (['(300)', 'self.args.z1_size'], {}), '(300, self.args.z1_size)\n', (1006, 1030), False, 'from torch.nn import Linear\n'), ((4117, 4142), 'numpy.array', 'np.array', (['likelihood_test'], {}), '(likelihood_test)\n', (4125, 4142), True, 'import numpy as np\n'), ((2973, 2989), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (2983, 2989), False, 'import torch\n'), ((3007, 3021), 'torch.mean', 'torch.mean', (['RE'], {}), '(RE)\n', (3017, 3021), False, 'import torch\n'), ((3039, 3053), 'torch.mean', 'torch.mean', (['KL'], {}), '(KL)\n', (3049, 3053), False, 'import torch\n'), ((3910, 3923), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (3920, 3923), True, 'import numpy as np\n'), ((3940, 3983), 'numpy.reshape', 'np.reshape', (['a', '(a.shape[0] * a.shape[1], 1)'], {}), '(a, (a.shape[0] * a.shape[1], 1))\n', (3950, 3983), True, 'import numpy as np\n'), ((4011, 4023), 'scipy.misc.logsumexp', 'logsumexp', (['a'], {}), '(a)\n', (4020, 4023), False, 'from scipy.misc import logsumexp\n'), ((4213, 4237), 'numpy.mean', 'np.mean', (['likelihood_test'], {}), '(likelihood_test)\n', (4220, 4237), True, 'import numpy as np\n'), ((5945, 6010), 'torch.clamp', 'torch.clamp', (['x_mean'], {'min': '(0.0 + 1.0 / 512.0)', 'max': '(1.0 - 1.0 / 512.0)'}), '(x_mean, min=0.0 + 1.0 / 512.0, max=1.0 - 1.0 / 512.0)\n', (5956, 6010), False, 'import torch\n'), ((895, 924), 'numpy.prod', 'np.prod', (['self.args.input_size'], {}), '(self.args.input_size)\n', (902, 924), True, 'import numpy as np\n'), ((1102, 1140), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {'min_val': '(-6.0)', 'max_val': '(2.0)'}), '(min_val=-6.0, max_val=2.0)\n', (1113, 1140), True, 'import torch.nn as nn\n'), ((1390, 1419), 'numpy.prod', 'np.prod', (['self.args.input_size'], {}), '(self.args.input_size)\n', (1397, 1419), True, 'import numpy as np\n'), ((4521, 4550), 'numpy.prod', 'np.prod', (['self.args.input_size'], {}), '(self.args.input_size)\n', (4528, 4550), True, 'import numpy as np\n'), ((6775, 6790), 'torch.max', 'torch.max', (['a', '(1)'], {}), '(a, 1)\n', (6784, 6790), False, 'import torch\n'), ((1432, 1444), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1442, 1444), True, 'import torch.nn as nn\n'), ((1574, 1603), 'numpy.prod', 'np.prod', (['self.args.input_size'], {}), '(self.args.input_size)\n', (1581, 1603), True, 'import numpy as np\n'), ((1675, 1704), 'numpy.prod', 'np.prod', (['self.args.input_size'], {}), '(self.args.input_size)\n', (1682, 1704), True, 'import numpy as np\n'), ((6730, 6741), 'math.log', 'math.log', (['C'], {}), '(C)\n', (6738, 6741), False, 'import math\n'), ((1616, 1628), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1626, 1628), True, 'import torch.nn as nn\n'), ((1757, 1793), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {'min_val': '(-4.5)', 'max_val': '(0)'}), '(min_val=-4.5, max_val=0)\n', (1768, 1793), True, 'import torch.nn as nn\n'), ((4933, 4972), 'torch.FloatTensor', 'torch.FloatTensor', (['N', 'self.args.z1_size'], {}), '(N, self.args.z1_size)\n', (4950, 4972), False, 'import torch\n')] |
#!/usr/bin/python
import argparse
import copy
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import scipy as sp
import tensorflow as tf
from functools import reduce
from sklearn.metrics import auc
from sklearn.metrics import roc_curve
from sklearn.preprocessing import MinMaxScaler
parser = argparse.ArgumentParser()
## input data arguments
parser.add_argument('--config', type=str)
args = parser.parse_args()
with open(args.config, 'r') as infile:
config = json.load(infile)
print(config)
model_name = config['model_name']
predictor_files_arr = config['predictors']
augment_n = config['augment']['n']
augment_stdev = config['augment']['stdev']
response_file = config['response']['fname']
response_column = config['response']['column']
response_lookback = config['response']['lookback']
dropout_pct = config['model_params']['dropout_pct']
n_layers = config['model_params']['n_layers']
nodes_per_layer=config['model_params']['nodes_per_layer']
model_type=config['model_params']['model_type']
epochs = config['training_params']['epochs']
# create the predictor dataset
def add_lookback_predictors(df, num_days, include_today=True):
if include_today:
start = 0
else:
start = 1
num_days = num_days + 1
df = df.sort_values(by=['Date'])
n = df.shape[0]
shifted_dfs = [(i, df.iloc[num_days-i:n-i].drop(columns='Date')) for i in range(start, num_days)]
shifted_dfs_columns = [["{}-{}".format(column, shifted_dfs[j][0]) for column in shifted_dfs[j][1].columns] for j in range(len(shifted_dfs))]
for j in range(len(shifted_dfs)):
shifted_dfs[j][1].columns = shifted_dfs_columns[j]
shifted_dfs[j][1].index = range(n-num_days)
print(len(shifted_dfs))
df_new = pd.concat([d[1] for d in shifted_dfs], axis=1)
df_new['Date'] = df.iloc[num_days:]['Date'].values
return df_new
pred = [add_lookback_predictors(pd.read_csv(f[0]), f[1]+1) for f in predictor_files_arr]
# remove junk columns if they exist
pred_df = reduce(lambda x, y: pd.merge(left=x, right=y, on='Date'), pred)
pred_cols = list(filter(lambda a: not re.match('Unnamed', a), pred_df.columns))
pred_df = pred_df[pred_cols]
pred_df = pred_df.reindex(sorted(pred_df.columns), axis=1)
print("Predictors data")
print(pred_df[['Date']+ list(filter(lambda c: 'keyword_word2vec_york_sum' in c, pred_df.columns))].head())
# add in response data
# load the data
response_df = pd.read_csv(response_file)[[response_column, 'Date']]
response_df.columns = ['response', 'Date']
# create response lookback if necessary
if response_lookback > 0:
lookback_pred = add_lookback_predictors(response_df, response_lookback, include_today=False)
response_df = pd.merge(right=response_df, left=lookback_pred, on='Date')
print(response_df.head())
# if classifier, change response variable
if model_type == 'classifier':
response_df['response'] = response_df.apply(lambda x: 1 if x.response > 0 else 0, axis=1)
# create design matrix
design_mat = pd.merge(left=pred_df, right=response_df, on='Date')
print(design_mat.shape)
print(design_mat.head())
### Create training and test sets
design_test = design_mat.iloc[(596+response_lookback):693]
design_train = design_mat.iloc[response_lookback:595]
Xt = design_test.drop(columns=['Date', 'response']).to_numpy()
yt = design_test['response'].to_numpy() # vol for bonds
X = design_train.drop(columns=['Date', 'response']).to_numpy()
y = design_train['response'].to_numpy()
scale = MinMaxScaler().fit(X)
X_orig = copy.deepcopy(X)
y_orig = copy.deepcopy(y)
for n in range(augment_n):
random_noise = np.random.normal(0, augment_stdev, X_orig.shape)
X = np.vstack((X, X_orig + random_noise))
y = np.hstack((y, y_orig))
X = scale.transform(X)
Xt = scale.transform(Xt)
### Define the model
def create_model(n_predictors, dropout_pct=0, n_layers=3, nodes_per_layer=32, model_type='classifier'):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dropout(dropout_pct, input_shape=(n_predictors,)))
# add densely connected layers
for l in range(n_layers):
model.add(tf.keras.layers.Dense(nodes_per_layer, activation='relu'))
model.add(tf.keras.layers.Dropout(dropout_pct))
# output layer
if model_type == 'classifier':
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
# compile the model with adam optimizer and binary cross entropy loss
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
filepath="results/" + model_name + "/weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5"
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
else:
model.add(tf.keras.layers.Dense(1, activation='linear'))
model.compile(loss='mean_squared_error')
filepath="results/" + model_name + "/weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5"
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
return(model, checkpoint)
# create the model
model, checkpoint = create_model(X.shape[1],
dropout_pct=dropout_pct,
n_layers=n_layers,
nodes_per_layer=nodes_per_layer,
model_type=model_type)
history = model.fit(X, y, epochs=epochs, batch_size=128, verbose=1, callbacks=[checkpoint], validation_split=0.3)
if model_type == 'classifier':
best_epoch = np.argmax(history.history['val_accuracy']) + 1
best_score = np.max(history.history['val_accuracy'])
else:
best_epoch = np.argmin(history.history['val_loss']) + 1
best_score = np.min(history.history['val_loss'])
best_model_path = "results/" + model_name + "/weights-improvement-{0:02d}-{1:.2f}.hdf5".format(best_epoch, best_score)
best_model, _ = create_model(X.shape[1],
dropout_pct=dropout_pct,
n_layers=n_layers,
nodes_per_layer=nodes_per_layer,
model_type=model_type)
best_model.load_weights(best_model_path)
figure, ax = plt.subplots(1, 2, figsize=(20,7))
if model_type == 'classifier':
# Training plot
ax[0].plot(range(len(history.history['accuracy'])), history.history['accuracy'], c='blue', label='Training Accuracy')
ax[0].plot(range(len(history.history['val_accuracy'])), history.history['val_accuracy'], c='red', label='Validation Accuracy')
ax[0].set_ylabel('Accuracy')
ax[0].set_xlabel('Epoch')
ax[0].legend()
training_acc = history.history['accuracy'][best_epoch - 1]
validation_acc = history.history['val_accuracy'][best_epoch - 1]
ax[0].set_title('Training and validation performance\n Training Accuracy: {}, Validation Accuracy: {}'.format(training_acc, validation_acc))
# AUC score
pred_y = best_model.predict(X_orig, verbose=0)
pred_yt = best_model.predict(Xt, verbose=0)
y_pred_keras = best_model.predict(Xt).ravel()
fpr_keras, tpr_keras, thresholds_keras = roc_curve(yt, y_pred_keras)
auc_keras = auc(fpr_keras, tpr_keras)
print("Performance: AUC {}, training_accuracy {}, validation_accuracy {}".format(auc_keras, training_acc, validation_acc))
ax[1].plot(fpr_keras, tpr_keras)
ax[1].set_title('AUC: {}'.format(auc_keras))
ax[1].set_ylabel('True Positive Rate')
ax[1].set_xlabel('False Positive Rate')
else:
# Training plot
ax[0].plot(range(len(history.history['loss'])), history.history['loss'], c='blue', label='Training Loss')
ax[0].plot(range(len(history.history['val_loss'])), history.history['val_loss'], c='red', label='Validation Loss')
ax[0].set_ylabel('Mean Squared Error Loss')
ax[0].set_xlabel('Epoch')
ax[0].legend()
training_loss = history.history['loss'][best_epoch - 1]
validation_loss = history.history['val_loss'][best_epoch - 1]
test_loss = best_model.evaluate(Xt, yt, verbose=0)
rand_vals = []
for x in range(100):
rand_yt = np.array(copy.deepcopy(yt))
np.random.shuffle(rand_yt)
rand_vals.append(best_model.evaluate(Xt, rand_yt, verbose=0))
sorted_vals = np.sort(rand_vals)
ttest_t, ttest_pval = sp.stats.ttest_1samp(sorted_vals, test_loss)
print("Performance: tstat {}, pval {}, training_loss {}, validation_loss {}, test_loss {}, random5pc_loss {}, random_mean {}, random95pc_loss {}".format(ttest_t, ttest_pval, training_loss, validation_loss, test_loss, sorted_vals[5], np.mean(rand_vals), sorted_vals[95]))
ax[0].set_title('Training and validation performance\n Training loss: {0:.6f}, Validation loss: {1:.6f}, Test loss {2:.6f}\nRandomized 5%: {3:.6f}, mean: {4:.6f}, 95%: {5:.6f}\nT-test Statistic: {6:.6f} and p-value: {7:.6f}'.format(training_loss, validation_loss, test_loss, sorted_vals[5], np.mean(rand_vals), sorted_vals[95], ttest_t, ttest_pval))
pred_y = best_model.predict(X_orig, verbose=0)
pred_yt = best_model.predict(Xt, verbose=0)
ax[1].set_title('Neural Network Bond Prediction')
ax[1].set_ylabel('Bond Daily Delta')
ax[1].set_xlabel('Time (d)')
#plt.plot(range(len(y_orig)), pred_y, c='b', label='predict')
#plt.plot(range(len(y_orig)), y_orig, c='r', label='real')
ax[1].plot(range(len(y_orig),len(yt)+len(y_orig)), pred_yt, c='cornflowerblue', label='predict_test')
ax[1].plot(range(len(y_orig),len(yt)+len(y_orig)), yt, c='darkorange', label='real_test')
ax[1].legend()
ax[1].set_title("Predictions Test Set")
plt.savefig("results/{}/performance.png".format(model_name))
| [
"pandas.read_csv",
"numpy.hstack",
"sklearn.metrics.auc",
"sklearn.metrics.roc_curve",
"tensorflow.keras.layers.Dense",
"copy.deepcopy",
"numpy.mean",
"argparse.ArgumentParser",
"numpy.sort",
"numpy.max",
"numpy.vstack",
"numpy.min",
"numpy.argmin",
"tensorflow.keras.models.Sequential",
... | [((349, 374), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (372, 374), False, 'import argparse\n'), ((3056, 3108), 'pandas.merge', 'pd.merge', ([], {'left': 'pred_df', 'right': 'response_df', 'on': '"""Date"""'}), "(left=pred_df, right=response_df, on='Date')\n", (3064, 3108), True, 'import pandas as pd\n'), ((3572, 3588), 'copy.deepcopy', 'copy.deepcopy', (['X'], {}), '(X)\n', (3585, 3588), False, 'import copy\n'), ((3598, 3614), 'copy.deepcopy', 'copy.deepcopy', (['y'], {}), '(y)\n', (3611, 3614), False, 'import copy\n'), ((6333, 6368), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(20, 7)'}), '(1, 2, figsize=(20, 7))\n', (6345, 6368), True, 'import matplotlib.pyplot as plt\n'), ((523, 540), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (532, 540), False, 'import json\n'), ((1809, 1855), 'pandas.concat', 'pd.concat', (['[d[1] for d in shifted_dfs]'], {'axis': '(1)'}), '([d[1] for d in shifted_dfs], axis=1)\n', (1818, 1855), True, 'import pandas as pd\n'), ((2486, 2512), 'pandas.read_csv', 'pd.read_csv', (['response_file'], {}), '(response_file)\n', (2497, 2512), True, 'import pandas as pd\n'), ((2765, 2823), 'pandas.merge', 'pd.merge', ([], {'right': 'response_df', 'left': 'lookback_pred', 'on': '"""Date"""'}), "(right=response_df, left=lookback_pred, on='Date')\n", (2773, 2823), True, 'import pandas as pd\n'), ((3663, 3711), 'numpy.random.normal', 'np.random.normal', (['(0)', 'augment_stdev', 'X_orig.shape'], {}), '(0, augment_stdev, X_orig.shape)\n', (3679, 3711), True, 'import numpy as np\n'), ((3720, 3757), 'numpy.vstack', 'np.vstack', (['(X, X_orig + random_noise)'], {}), '((X, X_orig + random_noise))\n', (3729, 3757), True, 'import numpy as np\n'), ((3766, 3788), 'numpy.hstack', 'np.hstack', (['(y, y_orig)'], {}), '((y, y_orig))\n', (3775, 3788), True, 'import numpy as np\n'), ((3977, 4005), 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), '()\n', (4003, 4005), True, 'import tensorflow as tf\n'), ((5723, 5762), 'numpy.max', 'np.max', (["history.history['val_accuracy']"], {}), "(history.history['val_accuracy'])\n", (5729, 5762), True, 'import numpy as np\n'), ((5847, 5882), 'numpy.min', 'np.min', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (5853, 5882), True, 'import numpy as np\n'), ((7255, 7282), 'sklearn.metrics.roc_curve', 'roc_curve', (['yt', 'y_pred_keras'], {}), '(yt, y_pred_keras)\n', (7264, 7282), False, 'from sklearn.metrics import roc_curve\n'), ((7299, 7324), 'sklearn.metrics.auc', 'auc', (['fpr_keras', 'tpr_keras'], {}), '(fpr_keras, tpr_keras)\n', (7302, 7324), False, 'from sklearn.metrics import auc\n'), ((8376, 8394), 'numpy.sort', 'np.sort', (['rand_vals'], {}), '(rand_vals)\n', (8383, 8394), True, 'import numpy as np\n'), ((8422, 8466), 'scipy.stats.ttest_1samp', 'sp.stats.ttest_1samp', (['sorted_vals', 'test_loss'], {}), '(sorted_vals, test_loss)\n', (8442, 8466), True, 'import scipy as sp\n'), ((1963, 1980), 'pandas.read_csv', 'pd.read_csv', (['f[0]'], {}), '(f[0])\n', (1974, 1980), True, 'import pandas as pd\n'), ((2087, 2123), 'pandas.merge', 'pd.merge', ([], {'left': 'x', 'right': 'y', 'on': '"""Date"""'}), "(left=x, right=y, on='Date')\n", (2095, 2123), True, 'import pandas as pd\n'), ((3540, 3554), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (3552, 3554), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((4020, 4085), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout_pct'], {'input_shape': '(n_predictors,)'}), '(dropout_pct, input_shape=(n_predictors,))\n', (4043, 4085), True, 'import tensorflow as tf\n'), ((4700, 4816), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['filepath'], {'monitor': '"""val_accuracy"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), "(filepath, monitor='val_accuracy',\n verbose=1, save_best_only=True, mode='max')\n", (4734, 4816), True, 'import tensorflow as tf\n'), ((5057, 5169), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['filepath'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(filepath, monitor='val_loss', verbose=1,\n save_best_only=True, mode='min')\n", (5091, 5169), True, 'import tensorflow as tf\n'), ((5659, 5701), 'numpy.argmax', 'np.argmax', (["history.history['val_accuracy']"], {}), "(history.history['val_accuracy'])\n", (5668, 5701), True, 'import numpy as np\n'), ((5787, 5825), 'numpy.argmin', 'np.argmin', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (5796, 5825), True, 'import numpy as np\n'), ((8260, 8286), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_yt'], {}), '(rand_yt)\n', (8277, 8286), True, 'import numpy as np\n'), ((4171, 4228), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['nodes_per_layer'], {'activation': '"""relu"""'}), "(nodes_per_layer, activation='relu')\n", (4192, 4228), True, 'import tensorflow as tf\n'), ((4248, 4284), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout_pct'], {}), '(dropout_pct)\n', (4271, 4284), True, 'import tensorflow as tf\n'), ((4360, 4406), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (4381, 4406), True, 'import tensorflow as tf\n'), ((4842, 4887), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (4863, 4887), True, 'import tensorflow as tf\n'), ((8233, 8250), 'copy.deepcopy', 'copy.deepcopy', (['yt'], {}), '(yt)\n', (8246, 8250), False, 'import copy\n'), ((8704, 8722), 'numpy.mean', 'np.mean', (['rand_vals'], {}), '(rand_vals)\n', (8711, 8722), True, 'import numpy as np\n'), ((9037, 9055), 'numpy.mean', 'np.mean', (['rand_vals'], {}), '(rand_vals)\n', (9044, 9055), True, 'import numpy as np\n'), ((2169, 2191), 're.match', 're.match', (['"""Unnamed"""', 'a'], {}), "('Unnamed', a)\n", (2177, 2191), False, 'import re\n')] |
import numpy as np
import torch
from bisect import bisect_left
class TinyImages(torch.utils.data.Dataset):
def __init__(self, transform=None, exclude_cifar=True):
data_file = open('datasets/unlabeled_datasets/80M_Tiny_Images/tiny_images.bin', "rb")
def load_image(idx):
data_file.seek(idx * 3072)
data = data_file.read(3072)
return np.fromstring(data, dtype='uint8').reshape(32, 32, 3, order="F")
self.load_image = load_image
self.offset = 0 # offset index
self.transform = transform
self.exclude_cifar = exclude_cifar
if exclude_cifar:
self.cifar_idxs = []
with open('datasets/unlabeled_datasets/80M_Tiny_Images/80mn_cifar_idxs.txt', 'r') as idxs:
for idx in idxs:
# indices in file take the 80mn database to start at 1, hence "- 1"
self.cifar_idxs.append(int(idx) - 1)
# hash table option
self.cifar_idxs = set(self.cifar_idxs)
self.in_cifar = lambda x: x in self.cifar_idxs
# bisection search option
# self.cifar_idxs = tuple(sorted(self.cifar_idxs))
#
# def binary_search(x, hi=len(self.cifar_idxs)):
# pos = bisect_left(self.cifar_idxs, x, 0, hi) # find insertion position
# return True if pos != hi and self.cifar_idxs[pos] == x else False
#
# self.in_cifar = binary_search
def __getitem__(self, index):
index = (index + self.offset) % 79302016
if self.exclude_cifar:
while self.in_cifar(index):
index = np.random.randint(79302017)
img = self.load_image(index)
if self.transform is not None:
img = self.transform(img)
return img, 0 # 0 is the class
def __len__(self):
return 79302017
| [
"numpy.random.randint",
"numpy.fromstring"
] | [((1690, 1717), 'numpy.random.randint', 'np.random.randint', (['(79302017)'], {}), '(79302017)\n', (1707, 1717), True, 'import numpy as np\n'), ((392, 426), 'numpy.fromstring', 'np.fromstring', (['data'], {'dtype': '"""uint8"""'}), "(data, dtype='uint8')\n", (405, 426), True, 'import numpy as np\n')] |
import matplotlib.pylab as plt
import numpy as np
#x = np.linspace(-np.pi, np.pi, 10)
#plt.plot(x, np.sin(x))
#plt.xlabel('Angle [rad]')
#plt.ylabel('sin(x)')
#plt.axis('tight')
#plt.show()
def sin_static():
# raw
x = np.linspace(-np.pi, np.pi, 252)
y = np.sin(x) * 4
# discretize y axis
y_disc = y.astype(int)
return y_disc
y = sin_static()
x = np.linspace(-np.pi, np.pi, 252)
plt.plot(x, y)
plt.xlabel('Angle [rad]')
plt.ylabel('sin(x)')
plt.axis('tight')
plt.show()
| [
"matplotlib.pylab.axis",
"matplotlib.pylab.xlabel",
"numpy.linspace",
"matplotlib.pylab.show",
"numpy.sin",
"matplotlib.pylab.plot",
"matplotlib.pylab.ylabel"
] | [((376, 407), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(252)'], {}), '(-np.pi, np.pi, 252)\n', (387, 407), True, 'import numpy as np\n'), ((408, 422), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (416, 422), True, 'import matplotlib.pylab as plt\n'), ((423, 448), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Angle [rad]"""'], {}), "('Angle [rad]')\n", (433, 448), True, 'import matplotlib.pylab as plt\n'), ((449, 469), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""sin(x)"""'], {}), "('sin(x)')\n", (459, 469), True, 'import matplotlib.pylab as plt\n'), ((470, 487), 'matplotlib.pylab.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (478, 487), True, 'import matplotlib.pylab as plt\n'), ((488, 498), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (496, 498), True, 'import matplotlib.pylab as plt\n'), ((228, 259), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(252)'], {}), '(-np.pi, np.pi, 252)\n', (239, 259), True, 'import numpy as np\n'), ((268, 277), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (274, 277), True, 'import numpy as np\n')] |
import math
from typing import Dict, Optional, Tuple
import numpy as np
import networkx as nx
def GetRecvWeights(topo: nx.DiGraph, rank: int) -> Tuple[float, Dict[int, float]]:
"""Return a Tuple of self_weight and neighbor_weights for receiving dictionary."""
weight_matrix = nx.to_numpy_array(topo)
self_weight = 0.0
neighbor_weights = {}
for src_rank in topo.predecessors(rank):
if src_rank == rank:
self_weight = weight_matrix[src_rank, rank]
else:
neighbor_weights[src_rank] = weight_matrix[src_rank, rank]
return self_weight, neighbor_weights
def GetSendWeights(topo: nx.DiGraph, rank: int) -> Tuple[float, Dict[int, float]]:
"""Return a Tuple of self_weight and neighbor_weights for sending dictionary."""
weight_matrix = nx.to_numpy_array(topo)
self_weight = 0.0
neighbor_weights = {}
for recv_rank in topo.successors(rank):
if recv_rank == rank:
self_weight = weight_matrix[rank, recv_rank]
else:
neighbor_weights[recv_rank] = weight_matrix[rank, recv_rank]
return self_weight, neighbor_weights
def isPowerOf(x, base):
assert isinstance(base, int), "Base has to be a integer."
assert base > 1, "Base has to a interger larger than 1."
assert x > 0
if (base ** int(math.log(x, base))) == x:
return True
return False
def ExponentialTwoGraph(size: int) -> nx.DiGraph:
"""Generate graph topology such that each points only connected to a
point such that the index difference is the power of 2."""
assert size > 0
x = np.array([1.0 if i & (i - 1) == 0 else 0 for i in range(size)])
x /= x.sum()
topo = np.empty((size, size))
for i in range(size):
topo[i] = np.roll(x, i)
G = nx.from_numpy_array(topo, create_using=nx.DiGraph)
return G
def ExponentialGraph(size: int, base: int = 2) -> nx.DiGraph:
"""Generate graph topology such that each points only connected to a
point such that the index difference is power of base."""
x = [1.0]
for i in range(1, size):
if isPowerOf(i, base):
x.append(1.0)
else:
x.append(0.0)
x_a = np.array(x)
x_a /= x_a.sum()
topo = np.empty((size, size))
for i in range(size):
topo[i] = np.roll(x_a, i)
G = nx.from_numpy_array(topo, create_using=nx.DiGraph)
return G
def MeshGrid2DGraph(size: int, shape: Optional[Tuple[int, int]] = None) -> nx.DiGraph:
"""Generate 2D MeshGrid structure of graph.
Assume shape = (nrow, ncol), when shape is provided, a meshgrid of nrow*ncol will be generated.
when shape is not provided, nrow and ncol will be the two closest factors of size.
For example: size = 24, nrow and ncol will be 4 and 6, respectively.
We assume nrow will be equal to or smaller than ncol.
If size is a prime number, nrow will be 1, and ncol will be size, which degrades the topology
into a linear one.
"""
assert size > 0
if shape is None:
i = int(np.sqrt(size))
while size % i != 0:
i -= 1
shape = (i, size // i)
nrow, ncol = shape
assert size == nrow * ncol, "The shape doesn't match the size provided."
topo = np.zeros((size, size))
for i in range(size):
topo[i][i] = 1.0
if (i + 1) % ncol != 0:
topo[i][i + 1] = 1.0
topo[i + 1][i] = 1.0
if i + ncol < size:
topo[i][i + ncol] = 1.0
topo[i + ncol][i] = 1.0
# According to Hasting rule (Policy 1) in https://arxiv.org/pdf/1702.05122.pdf
# The neighbor definition in the paper is different from our implementation,
# which includes the self node.
topo_neighbor_with_self = [np.nonzero(topo[i])[0] for i in range(size)]
for i in range(size):
for j in topo_neighbor_with_self[i]:
if i != j:
topo[i][j] = 1.0 / max(
len(topo_neighbor_with_self[i]), len(topo_neighbor_with_self[j])
)
topo[i][i] = 2.0 - topo[i].sum()
G = nx.from_numpy_array(topo, create_using=nx.DiGraph)
return G
def StarGraph(size: int, center_rank: int = 0) -> nx.DiGraph:
"""Generate star structure of graph.
All other ranks are connected to the center_rank. The connection is
bidirection, i.e. if the weight from node i to node j is non-zero, so
is the weight from node j to node i.
"""
assert size > 0
topo = np.zeros((size, size))
for i in range(size):
topo[i, i] = 1 - 1 / size
topo[center_rank, i] = 1 / size
topo[i, center_rank] = 1 / size
G = nx.from_numpy_array(topo, create_using=nx.DiGraph)
return G
def RingGraph(size: int, connect_style: int = 0) -> nx.DiGraph:
"""Generate ring structure of graph (uniliteral).
Argument connect_style should be an integer between 0 and 2, where
0 represents the bi-connection, 1 represents the left-connection,
and 2 represents the right-connection.
"""
assert size > 0
assert 0 <= connect_style <= 2, (
"connect_style has to be int between 0 and 2, where 1 "
"for bi-connection, 1 for left connection, 2 for right connection."
)
if size == 1:
return nx.from_numpy_array(np.array([[1.0]]), create_using=nx.DiGraph)
if size == 2:
return nx.from_numpy_array(
np.array([[0.5, 0.5], [0.5, 0.5]]), create_using=nx.DiGraph
)
x = np.zeros(size)
x[0] = 0.5
if connect_style == 0: # bi-connection
x[0] = 1 / 3.0
x[-1] = 1 / 3.0
x[1] = 1 / 3.0
elif connect_style == 1: # left-connection
x[-1] = 0.5
elif connect_style == 2: # right-connection
x[1] = 0.5
else:
raise ValueError("Connect_style has to be int between 0 and 2")
topo = np.empty((size, size))
for i in range(size):
topo[i] = np.roll(x, i)
G = nx.from_numpy_array(topo, create_using=nx.DiGraph)
return G
| [
"numpy.roll",
"numpy.sqrt",
"networkx.to_numpy_array",
"math.log",
"numpy.array",
"numpy.zeros",
"networkx.from_numpy_array",
"numpy.empty",
"numpy.nonzero"
] | [((287, 310), 'networkx.to_numpy_array', 'nx.to_numpy_array', (['topo'], {}), '(topo)\n', (304, 310), True, 'import networkx as nx\n'), ((805, 828), 'networkx.to_numpy_array', 'nx.to_numpy_array', (['topo'], {}), '(topo)\n', (822, 828), True, 'import networkx as nx\n'), ((1693, 1715), 'numpy.empty', 'np.empty', (['(size, size)'], {}), '((size, size))\n', (1701, 1715), True, 'import numpy as np\n'), ((1782, 1832), 'networkx.from_numpy_array', 'nx.from_numpy_array', (['topo'], {'create_using': 'nx.DiGraph'}), '(topo, create_using=nx.DiGraph)\n', (1801, 1832), True, 'import networkx as nx\n'), ((2195, 2206), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2203, 2206), True, 'import numpy as np\n'), ((2239, 2261), 'numpy.empty', 'np.empty', (['(size, size)'], {}), '((size, size))\n', (2247, 2261), True, 'import numpy as np\n'), ((2330, 2380), 'networkx.from_numpy_array', 'nx.from_numpy_array', (['topo'], {'create_using': 'nx.DiGraph'}), '(topo, create_using=nx.DiGraph)\n', (2349, 2380), True, 'import networkx as nx\n'), ((3245, 3267), 'numpy.zeros', 'np.zeros', (['(size, size)'], {}), '((size, size))\n', (3253, 3267), True, 'import numpy as np\n'), ((4080, 4130), 'networkx.from_numpy_array', 'nx.from_numpy_array', (['topo'], {'create_using': 'nx.DiGraph'}), '(topo, create_using=nx.DiGraph)\n', (4099, 4130), True, 'import networkx as nx\n'), ((4476, 4498), 'numpy.zeros', 'np.zeros', (['(size, size)'], {}), '((size, size))\n', (4484, 4498), True, 'import numpy as np\n'), ((4647, 4697), 'networkx.from_numpy_array', 'nx.from_numpy_array', (['topo'], {'create_using': 'nx.DiGraph'}), '(topo, create_using=nx.DiGraph)\n', (4666, 4697), True, 'import networkx as nx\n'), ((5470, 5484), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (5478, 5484), True, 'import numpy as np\n'), ((5844, 5866), 'numpy.empty', 'np.empty', (['(size, size)'], {}), '((size, size))\n', (5852, 5866), True, 'import numpy as np\n'), ((5933, 5983), 'networkx.from_numpy_array', 'nx.from_numpy_array', (['topo'], {'create_using': 'nx.DiGraph'}), '(topo, create_using=nx.DiGraph)\n', (5952, 5983), True, 'import networkx as nx\n'), ((1760, 1773), 'numpy.roll', 'np.roll', (['x', 'i'], {}), '(x, i)\n', (1767, 1773), True, 'import numpy as np\n'), ((2306, 2321), 'numpy.roll', 'np.roll', (['x_a', 'i'], {}), '(x_a, i)\n', (2313, 2321), True, 'import numpy as np\n'), ((5911, 5924), 'numpy.roll', 'np.roll', (['x', 'i'], {}), '(x, i)\n', (5918, 5924), True, 'import numpy as np\n'), ((3040, 3053), 'numpy.sqrt', 'np.sqrt', (['size'], {}), '(size)\n', (3047, 3053), True, 'import numpy as np\n'), ((3749, 3768), 'numpy.nonzero', 'np.nonzero', (['topo[i]'], {}), '(topo[i])\n', (3759, 3768), True, 'import numpy as np\n'), ((5281, 5298), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (5289, 5298), True, 'import numpy as np\n'), ((5391, 5425), 'numpy.array', 'np.array', (['[[0.5, 0.5], [0.5, 0.5]]'], {}), '([[0.5, 0.5], [0.5, 0.5]])\n', (5399, 5425), True, 'import numpy as np\n'), ((1322, 1339), 'math.log', 'math.log', (['x', 'base'], {}), '(x, base)\n', (1330, 1339), False, 'import math\n')] |
import ConfigSpace
import numpy as np
import threading
from robo.models.lcnet import LCNet, get_lc_net
from hpbandster.core.base_config_generator import base_config_generator
def smoothing(lc):
new_lc = []
curr_best = np.inf
for i in range(len(lc)):
if lc[i] < curr_best:
curr_best = lc[i]
new_lc.append(curr_best)
return new_lc
class LCNetWrapper(base_config_generator):
def __init__(self,
configspace,
max_budget,
n_points=2000,
delta=1.0,
n_candidates=1024,
**kwargs):
"""
Parameters:
-----------
directory: string
where the results are logged
logger: hpbandster.utils.result_logger_v??
the logger to store the data, defaults to v1
overwrite: bool
whether or not existing data will be overwritten
"""
super(LCNetWrapper, self).__init__(**kwargs)
self.n_candidates = n_candidates
self.model = LCNet(sampling_method="sghmc",
l_rate=np.sqrt(1e-4),
mdecay=.05,
n_nets=100,
burn_in=500,
n_iters=3000,
get_net=get_lc_net,
precondition=True)
self.config_space = configspace
self.max_budget = max_budget
self.train = None
self.train_targets = None
self.n_points = n_points
self.is_trained = False
self.counter = 0
self.delta = delta
self.lock = threading.Lock()
def get_config(self, budget):
"""
function to sample a new configuration
This function is called inside Hyperband to query a new configuration
Parameters:
-----------
budget: float
the budget for which this configuration is scheduled
returns: config
should return a valid configuration
"""
self.lock.acquire()
if not self.is_trained:
c = self.config_space.sample_configuration().get_array()
else:
candidates = np.array([
self.config_space.sample_configuration().get_array()
for _ in range(self.n_candidates)
])
# We are only interested on the asymptotic value
projected_candidates = np.concatenate((candidates, np.ones([self.n_candidates, 1])),
axis=1)
# Compute the upper confidence bound of the function at the asymptote
m, v = self.model.predict(projected_candidates)
ucb_values = m + self.delta * np.sqrt(v)
print(ucb_values)
# Sample a configuration based on the ucb values
p = np.ones(self.n_candidates) * (ucb_values / np.sum(ucb_values))
idx = np.random.choice(self.n_candidates, 1, False, p)
c = candidates[idx][0]
config = ConfigSpace.Configuration(self.config_space, vector=c)
self.lock.release()
return config.get_dictionary(), {}
def new_result(self, job):
"""
function to register finished runs
Every time a run has finished, this function should be called
to register it with the result logger. If overwritten, make
sure to call this method from the base class to ensure proper
logging.
Parameters:
-----------
job_id: dict
a dictionary containing all the info about the run
job_result: dict
contains all the results of the job, i.e. it's a dict with
the keys 'loss' and 'info'
"""
super().new_result(job)
conf = ConfigSpace.Configuration(self.config_space, job.kwargs['config']).get_array()
epochs = len(job.result["info"]["learning_curve"])
budget = int(job.kwargs["budget"])
t_idx = np.linspace(budget / epochs, budget, epochs) / self.max_budget
x_new = np.repeat(conf[None, :], t_idx.shape[0], axis=0)
x_new = np.concatenate((x_new, t_idx[:, None]), axis=1)
# Smooth learning curve
lc = smoothing(job.result["info"]["learning_curve"])
# Flip learning curves since LC-Net wants increasing curves
lc_new = [1 - y for y in lc]
if self.train is None:
self.train = x_new
self.train_targets = lc_new
else:
self.train = np.append(self.train, x_new, axis=0)
self.train_targets = np.append(self.train_targets, lc_new, axis=0)
if self.counter >= self.n_points:
self.lock.acquire()
y_min = np.min(self.train_targets)
y_max = np.max(self.train_targets)
train_targets = (self.train_targets - y_min) / (y_max - y_min)
self.model.train(self.train, train_targets)
self.is_trained = True
self.counter = 0
self.lock.release()
else:
self.counter += epochs
| [
"numpy.repeat",
"numpy.sqrt",
"numpy.ones",
"numpy.random.choice",
"threading.Lock",
"numpy.max",
"numpy.append",
"numpy.sum",
"numpy.linspace",
"numpy.concatenate",
"numpy.min",
"ConfigSpace.Configuration"
] | [((1678, 1694), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1692, 1694), False, 'import threading\n'), ((3133, 3187), 'ConfigSpace.Configuration', 'ConfigSpace.Configuration', (['self.config_space'], {'vector': 'c'}), '(self.config_space, vector=c)\n', (3158, 3187), False, 'import ConfigSpace\n'), ((4221, 4269), 'numpy.repeat', 'np.repeat', (['conf[None, :]', 't_idx.shape[0]'], {'axis': '(0)'}), '(conf[None, :], t_idx.shape[0], axis=0)\n', (4230, 4269), True, 'import numpy as np\n'), ((4287, 4334), 'numpy.concatenate', 'np.concatenate', (['(x_new, t_idx[:, None])'], {'axis': '(1)'}), '((x_new, t_idx[:, None]), axis=1)\n', (4301, 4334), True, 'import numpy as np\n'), ((3030, 3078), 'numpy.random.choice', 'np.random.choice', (['self.n_candidates', '(1)', '(False)', 'p'], {}), '(self.n_candidates, 1, False, p)\n', (3046, 3078), True, 'import numpy as np\n'), ((4142, 4186), 'numpy.linspace', 'np.linspace', (['(budget / epochs)', 'budget', 'epochs'], {}), '(budget / epochs, budget, epochs)\n', (4153, 4186), True, 'import numpy as np\n'), ((4677, 4713), 'numpy.append', 'np.append', (['self.train', 'x_new'], {'axis': '(0)'}), '(self.train, x_new, axis=0)\n', (4686, 4713), True, 'import numpy as np\n'), ((4747, 4792), 'numpy.append', 'np.append', (['self.train_targets', 'lc_new'], {'axis': '(0)'}), '(self.train_targets, lc_new, axis=0)\n', (4756, 4792), True, 'import numpy as np\n'), ((4889, 4915), 'numpy.min', 'np.min', (['self.train_targets'], {}), '(self.train_targets)\n', (4895, 4915), True, 'import numpy as np\n'), ((4936, 4962), 'numpy.max', 'np.max', (['self.train_targets'], {}), '(self.train_targets)\n', (4942, 4962), True, 'import numpy as np\n'), ((1136, 1151), 'numpy.sqrt', 'np.sqrt', (['(0.0001)'], {}), '(0.0001)\n', (1143, 1151), True, 'import numpy as np\n'), ((2949, 2975), 'numpy.ones', 'np.ones', (['self.n_candidates'], {}), '(self.n_candidates)\n', (2956, 2975), True, 'import numpy as np\n'), ((3943, 4009), 'ConfigSpace.Configuration', 'ConfigSpace.Configuration', (['self.config_space', "job.kwargs['config']"], {}), "(self.config_space, job.kwargs['config'])\n", (3968, 4009), False, 'import ConfigSpace\n'), ((2553, 2584), 'numpy.ones', 'np.ones', (['[self.n_candidates, 1]'], {}), '([self.n_candidates, 1])\n', (2560, 2584), True, 'import numpy as np\n'), ((2831, 2841), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (2838, 2841), True, 'import numpy as np\n'), ((2992, 3010), 'numpy.sum', 'np.sum', (['ucb_values'], {}), '(ucb_values)\n', (2998, 3010), True, 'import numpy as np\n')] |
from typing import Dict, List, Tuple
from itertools import product
import re
import yaml
from pathlib import Path
import numpy as np
import os
import shutil
from abc import ABCMeta, abstractmethod
import nncase
import struct
from compare_util import compare_with_ground_truth, VerboseType
class Edict:
def __init__(self, d: Dict[str, int]) -> None:
for name, value in d.items():
if isinstance(value, (list, tuple)):
setattr(self, name,
[Edict(x) if isinstance(x, dict) else x for x in value])
else:
if 'kwargs' in name:
setattr(self, name, value if value else dict())
else:
if isinstance(value, dict):
setattr(self, name, Edict(value))
else:
setattr(self, name, value)
def keys(self):
return self.__dict__.keys()
def items(self):
return self.__dict__.items()
def values(self):
return self.__dict__.values()
def __repr__(self, indent=0) -> str:
s: str = ''
for k, v in self.__dict__.items():
s += indent * ' ' + k + ' : '
if isinstance(v, Edict):
s += '\n' + v.__repr__(len(s) - s.rfind('\n'))
else:
s += v.__repr__().replace('\n', ' ')
s += '\n'
return s.rstrip('\n')
def generate_random(shape: List[int], dtype: np.dtype) -> np.ndarray:
if dtype is np.uint8:
data = np.random.randint(0, 256, shape)
elif dtype is np.int8:
data = np.random.randint(-128, 128, shape)
else:
data = np.random.rand(*shape) * 2 - 1
data = data.astype(dtype=dtype)
return data
def save_array_as_txt(save_path, value_np, bit_16_represent=False):
if bit_16_represent:
np.save(save_path, _cast_bfloat16_then_float32(value_np))
else:
with open(save_path, 'w') as f:
shape_info = "shape: (" + ",".join(str(dim)
for dim in value_np.shape) + ")\n"
f.write(shape_info)
for val in value_np.reshape([-1]):
f.write("%f\n" % val)
print("----> %s" % save_path)
def _cast_bfloat16_then_float32(values: np.array):
shape = values.shape
values = values.reshape([-1])
for i, value in enumerate(values):
value = float(value)
packed = struct.pack('!f', value)
integers = [c for c in packed][:2] + [0, 0]
value = struct.unpack('!f', bytes(integers))[0]
values[i] = value
values = values.reshape(shape)
return values
Fuc = {
'generate_random': generate_random
}
class TestRunner(metaclass=ABCMeta):
def __init__(self, case_name, targets=None) -> None:
config_root = os.path.dirname(__file__)
with open(os.path.join(config_root, 'config.yml'), encoding='utf8') as f:
cfg = yaml.safe_load(f)
config = Edict(cfg)
self.cfg = self.validte_config(config)
case_name = case_name.replace('[', '_').replace(']', '_')
self.case_dir = os.path.join(self.cfg.setup.root, case_name)
self.clear(self.case_dir)
if targets is None:
self.cfg.case.eval[0].values = self.validate_targets(
self.cfg.case.eval[0].values)
self.cfg.case.infer[0].values = self.validate_targets(
self.cfg.case.infer[0].values)
else:
targets = self.validate_targets(targets)
self.cfg.case.eval[0].values = targets
self.cfg.case.infer[0].values = targets
self.inputs: List[Dict] = []
self.calibs: List[Dict] = []
self.outputs: List[Dict] = []
self.input_paths: List[Tuple[str, str]] = []
self.calib_paths: List[Tuple[str, str]] = []
self.output_paths: List[Tuple[str, str]] = []
self.num_pattern = re.compile("(\d+)")
def validte_config(self, config):
return config
def validate_targets(self, targets):
new_targets = []
for t in targets:
if nncase.test_target(t):
new_targets.append(t)
else:
print("WARN: target[{0}] not found".format(t))
return new_targets
def run(self, model_path: str):
# 这里开多线程池去跑
# case_name = self.process_model_path_name(model_path)
# case_dir = os.path.join(self.cfg.setup.root, case_name)
# if not os.path.exists(case_dir):
# os.makedirs(case_dir)
case_dir = os.path.dirname(model_path)
self.run_single(self.cfg.case, case_dir, model_path)
def process_model_path_name(self, model_path: str) -> str:
if Path(model_path).is_file():
case_name = Path(model_path)
return '_'.join(str(case_name.parent).split('/') + [case_name.stem])
return model_path
def clear(self, case_dir):
in_ci = os.getenv('CI', False)
if in_ci:
if os.path.exists(self.cfg.setup.root):
shutil.rmtree(self.cfg.setup.root)
else:
if os.path.exists(case_dir):
shutil.rmtree(case_dir)
os.makedirs(case_dir)
@abstractmethod
def parse_model_input_output(self, model_path: str):
pass
@abstractmethod
def cpu_infer(self, case_dir: str, model_content: bytes):
pass
@abstractmethod
def import_model(self, compiler, model_content, import_options):
pass
def run_single(self, cfg, case_dir: str, model_file: str):
if not self.inputs:
self.parse_model_input_output(model_file)
self.generate_data(cfg.generate_inputs, case_dir,
self.inputs, self.input_paths, 'input')
self.generate_data(cfg.generate_calibs, case_dir,
self.calibs, self.calib_paths, 'calib')
self.cpu_infer(case_dir, model_file)
import_options, compile_options = self.get_compiler_options(cfg, model_file)
model_content = self.read_model_file(model_file)
self.run_evaluator(cfg, case_dir, import_options, compile_options, model_content)
self.run_inference(cfg, case_dir, import_options, compile_options, model_content)
def get_compiler_options(self, cfg, model_file):
import_options = nncase.ImportOptions(**cfg.importer_opt.kwargs)
if os.path.splitext(model_file)[-1] == ".tflite":
import_options.input_layout = "NHWC"
import_options.output_layout = "NHWC"
elif os.path.splitext(model_file)[-1] == ".onnx":
import_options.input_layout = "NCHW"
import_options.output_layout = "NCHW"
compile_options = nncase.CompileOptions()
for k, v in cfg.compile_opt.kwargs.items():
e = '"'
exec(
f'compile_options.{k} = {e + v + e if isinstance(v, str) else v}')
return import_options, compile_options
def run_evaluator(self, cfg, case_dir, import_options, compile_options, model_content):
names, args = TestRunner.split_value(cfg.eval)
for combine_args in product(*args):
dict_args = dict(zip(names, combine_args))
eval_output_paths = self.generate_evaluates(
cfg, case_dir, import_options,
compile_options, model_content, dict_args)
assert self.compare_results(
self.output_paths, eval_output_paths, dict_args)
def run_inference(self, cfg, case_dir, import_options, compile_options, model_content):
names, args = TestRunner.split_value(cfg.infer)
for combine_args in product(*args):
dict_args = dict(zip(names, combine_args))
if dict_args['ptq'] and len(self.inputs) > 1:
continue
infer_output_paths = self.nncase_infer(
cfg, case_dir, import_options,
compile_options, model_content, dict_args)
assert self.compare_results(
self.output_paths, infer_output_paths, dict_args)
@staticmethod
def split_value(kwcfg: List[Dict[str, str]]) -> Tuple[List[str], List[str]]:
arg_names = []
arg_values = []
for d in kwcfg:
arg_names.append(d.name)
arg_values.append(d.values)
return (arg_names, arg_values)
def read_model_file(self, model_file: str) -> bytes:
with open(model_file, 'rb') as f:
model_content = f.read()
return model_content
@staticmethod
def kwargs_to_path(path: str, kwargs: Dict[str, str]):
for k, v in kwargs.items():
if isinstance(v, str):
path = os.path.join(path, v)
elif isinstance(v, bool):
path = os.path.join(path, ('' if v else 'no') + k)
return path
def generate_evaluates(self, cfg, case_dir: str,
import_options: nncase.ImportOptions,
compile_options: nncase.CompileOptions,
model_content: bytes, kwargs: Dict[str, str]
) -> List[Tuple[str, str]]:
eval_dir = TestRunner.kwargs_to_path(
os.path.join(case_dir, 'eval'), kwargs)
compile_options.target = kwargs['target']
compile_options.dump_dir = eval_dir
compiler = nncase.Compiler(compile_options)
self.import_model(compiler, model_content, import_options)
evaluator = compiler.create_evaluator(3)
eval_output_paths = []
for i in range(len(self.inputs)):
input_tensor = nncase.RuntimeTensor.from_numpy(
self.inputs[i]['data'])
input_tensor.copy_to(evaluator.get_input_tensor(i))
evaluator.run()
for i in range(evaluator.outputs_size):
result = evaluator.get_output_tensor(i).to_numpy()
eval_output_paths.append((
os.path.join(eval_dir, f'nncase_result_{i}.bin'),
os.path.join(eval_dir, f'nncase_result_{i}.txt')))
result.tofile(eval_output_paths[-1][0])
save_array_as_txt(eval_output_paths[-1][1], result)
return eval_output_paths
def nncase_infer(self, cfg, case_dir: str,
import_options: nncase.ImportOptions,
compile_options: nncase.CompileOptions,
model_content: bytes, kwargs: Dict[str, str]
) -> List[Tuple[str, str]]:
infer_dir = TestRunner.kwargs_to_path(
os.path.join(case_dir, 'infer'), kwargs)
compile_options.target = kwargs['target']
compile_options.dump_dir = infer_dir
compiler = nncase.Compiler(compile_options)
self.import_model(compiler, model_content, import_options)
if kwargs['ptq']:
ptq_options = nncase.PTQTensorOptions()
ptq_options.set_tensor_data(np.asarray([sample['data'] for sample in self.calibs]).tobytes())
ptq_options.samples_count = cfg.generate_calibs.batch_size
ptq_options.input_mean = cfg.ptq_opt.kwargs['input_mean']
ptq_options.input_std = cfg.ptq_opt.kwargs['input_std']
compiler.use_ptq(ptq_options)
compiler.compile()
kmodel = compiler.gencode_tobytes()
with open(os.path.join(infer_dir, 'test.kmodel'), 'wb') as f:
f.write(kmodel)
sim = nncase.Simulator()
sim.load_model(kmodel)
infer_output_paths: List[np.ndarray] = []
for i in range(len(self.inputs)):
sim.set_input_tensor(
i, nncase.RuntimeTensor.from_numpy(self.inputs[i]['data']))
sim.run()
for i in range(sim.outputs_size):
result = sim.get_output_tensor(i).to_numpy()
infer_output_paths.append((
os.path.join(infer_dir, f'nncase_result_{i}.bin'),
os.path.join(infer_dir, f'nncase_result_{i}.txt')))
result.tofile(infer_output_paths[-1][0])
save_array_as_txt(infer_output_paths[-1][1], result)
return infer_output_paths
def on_test_start(self) -> None:
pass
def generate_data(self, cfg, case_dir: str, inputs: List[Dict], path_list: List[str], name: str):
for n in range(cfg.numbers):
i = 0
for input in inputs:
shape = input['shape']
shape[0] *= cfg.batch_size
data = Fuc[cfg.name](shape, input['dtype'])
path_list.append(
(os.path.join(case_dir, f'{name}_{n}_{i}.bin'),
os.path.join(case_dir, f'{name}_{n}_{i}.txt')))
data.tofile(path_list[-1][0])
save_array_as_txt(path_list[-1][1], data)
i += 1
input['data'] = data
def process_input(self, inputs: List[np.array], **kwargs) -> None:
pass
def process_output(self, outputs: List[np.array], **kwargs) -> None:
pass
def on_test_end(self) -> None:
pass
def compare_results(self,
ref_ouputs: List[Tuple[str]],
test_outputs: List[Tuple[str]],
kwargs: Dict[str, str]):
for ref_file, test_file in zip(ref_ouputs, test_outputs):
judge = compare_with_ground_truth(test_file[1],
ref_file[1],
state=0,
verbose=VerboseType.PRINT_RESULT)
name_list = test_file[1].split('/')
kw_names = ' '.join(name_list[-len(kwargs) - 2:-1])
i = self.num_pattern.findall(name_list[-1])
if judge.is_good():
result = "\nPass [ {0} ] Output: {1}!!\n".format(kw_names, i)
print(result)
with open(os.path.join(self.case_dir, 'test_result.txt'), 'a+') as f:
f.write(result)
else:
result = "\nFail [ {0} ] Output: {1}!!\n".format(kw_names, i)
print(result)
with open(os.path.join(self.case_dir, 'test_result.txt'), 'a+') as f:
f.write(result)
return False
return True
| [
"numpy.random.rand",
"nncase.ImportOptions",
"re.compile",
"nncase.CompileOptions",
"nncase.test_target",
"os.path.exists",
"pathlib.Path",
"itertools.product",
"numpy.asarray",
"compare_util.compare_with_ground_truth",
"os.path.splitext",
"struct.pack",
"os.path.dirname",
"nncase.RuntimeT... | [((1547, 1579), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', 'shape'], {}), '(0, 256, shape)\n', (1564, 1579), True, 'import numpy as np\n'), ((2464, 2488), 'struct.pack', 'struct.pack', (['"""!f"""', 'value'], {}), "('!f', value)\n", (2475, 2488), False, 'import struct\n'), ((2846, 2871), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2861, 2871), False, 'import os\n'), ((3161, 3205), 'os.path.join', 'os.path.join', (['self.cfg.setup.root', 'case_name'], {}), '(self.cfg.setup.root, case_name)\n', (3173, 3205), False, 'import os\n'), ((3965, 3985), 're.compile', 're.compile', (['"""(\\\\d+)"""'], {}), "('(\\\\d+)')\n", (3975, 3985), False, 'import re\n'), ((4607, 4634), 'os.path.dirname', 'os.path.dirname', (['model_path'], {}), '(model_path)\n', (4622, 4634), False, 'import os\n'), ((4995, 5017), 'os.getenv', 'os.getenv', (['"""CI"""', '(False)'], {}), "('CI', False)\n", (5004, 5017), False, 'import os\n'), ((5242, 5263), 'os.makedirs', 'os.makedirs', (['case_dir'], {}), '(case_dir)\n', (5253, 5263), False, 'import os\n'), ((6396, 6443), 'nncase.ImportOptions', 'nncase.ImportOptions', ([], {}), '(**cfg.importer_opt.kwargs)\n', (6416, 6443), False, 'import nncase\n'), ((6785, 6808), 'nncase.CompileOptions', 'nncase.CompileOptions', ([], {}), '()\n', (6806, 6808), False, 'import nncase\n'), ((7205, 7219), 'itertools.product', 'product', (['*args'], {}), '(*args)\n', (7212, 7219), False, 'from itertools import product\n'), ((7722, 7736), 'itertools.product', 'product', (['*args'], {}), '(*args)\n', (7729, 7736), False, 'from itertools import product\n'), ((9438, 9470), 'nncase.Compiler', 'nncase.Compiler', (['compile_options'], {}), '(compile_options)\n', (9453, 9470), False, 'import nncase\n'), ((10782, 10814), 'nncase.Compiler', 'nncase.Compiler', (['compile_options'], {}), '(compile_options)\n', (10797, 10814), False, 'import nncase\n'), ((11501, 11519), 'nncase.Simulator', 'nncase.Simulator', ([], {}), '()\n', (11517, 11519), False, 'import nncase\n'), ((1622, 1657), 'numpy.random.randint', 'np.random.randint', (['(-128)', '(128)', 'shape'], {}), '(-128, 128, shape)\n', (1639, 1657), True, 'import numpy as np\n'), ((2972, 2989), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (2986, 2989), False, 'import yaml\n'), ((4154, 4175), 'nncase.test_target', 'nncase.test_target', (['t'], {}), '(t)\n', (4172, 4175), False, 'import nncase\n'), ((4823, 4839), 'pathlib.Path', 'Path', (['model_path'], {}), '(model_path)\n', (4827, 4839), False, 'from pathlib import Path\n'), ((5051, 5086), 'os.path.exists', 'os.path.exists', (['self.cfg.setup.root'], {}), '(self.cfg.setup.root)\n', (5065, 5086), False, 'import os\n'), ((5168, 5192), 'os.path.exists', 'os.path.exists', (['case_dir'], {}), '(case_dir)\n', (5182, 5192), False, 'import os\n'), ((9285, 9315), 'os.path.join', 'os.path.join', (['case_dir', '"""eval"""'], {}), "(case_dir, 'eval')\n", (9297, 9315), False, 'import os\n'), ((9687, 9742), 'nncase.RuntimeTensor.from_numpy', 'nncase.RuntimeTensor.from_numpy', (["self.inputs[i]['data']"], {}), "(self.inputs[i]['data'])\n", (9718, 9742), False, 'import nncase\n'), ((10627, 10658), 'os.path.join', 'os.path.join', (['case_dir', '"""infer"""'], {}), "(case_dir, 'infer')\n", (10639, 10658), False, 'import os\n'), ((10934, 10959), 'nncase.PTQTensorOptions', 'nncase.PTQTensorOptions', ([], {}), '()\n', (10957, 10959), False, 'import nncase\n'), ((13416, 13516), 'compare_util.compare_with_ground_truth', 'compare_with_ground_truth', (['test_file[1]', 'ref_file[1]'], {'state': '(0)', 'verbose': 'VerboseType.PRINT_RESULT'}), '(test_file[1], ref_file[1], state=0, verbose=\n VerboseType.PRINT_RESULT)\n', (13441, 13516), False, 'from compare_util import compare_with_ground_truth, VerboseType\n'), ((2890, 2929), 'os.path.join', 'os.path.join', (['config_root', '"""config.yml"""'], {}), "(config_root, 'config.yml')\n", (2902, 2929), False, 'import os\n'), ((4771, 4787), 'pathlib.Path', 'Path', (['model_path'], {}), '(model_path)\n', (4775, 4787), False, 'from pathlib import Path\n'), ((5104, 5138), 'shutil.rmtree', 'shutil.rmtree', (['self.cfg.setup.root'], {}), '(self.cfg.setup.root)\n', (5117, 5138), False, 'import shutil\n'), ((5210, 5233), 'shutil.rmtree', 'shutil.rmtree', (['case_dir'], {}), '(case_dir)\n', (5223, 5233), False, 'import shutil\n'), ((6455, 6483), 'os.path.splitext', 'os.path.splitext', (['model_file'], {}), '(model_file)\n', (6471, 6483), False, 'import os\n'), ((8767, 8788), 'os.path.join', 'os.path.join', (['path', 'v'], {}), '(path, v)\n', (8779, 8788), False, 'import os\n'), ((11407, 11445), 'os.path.join', 'os.path.join', (['infer_dir', '"""test.kmodel"""'], {}), "(infer_dir, 'test.kmodel')\n", (11419, 11445), False, 'import os\n'), ((11696, 11751), 'nncase.RuntimeTensor.from_numpy', 'nncase.RuntimeTensor.from_numpy', (["self.inputs[i]['data']"], {}), "(self.inputs[i]['data'])\n", (11727, 11751), False, 'import nncase\n'), ((1683, 1705), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (1697, 1705), True, 'import numpy as np\n'), ((6614, 6642), 'os.path.splitext', 'os.path.splitext', (['model_file'], {}), '(model_file)\n', (6630, 6642), False, 'import os\n'), ((8850, 8893), 'os.path.join', 'os.path.join', (['path', "(('' if v else 'no') + k)"], {}), "(path, ('' if v else 'no') + k)\n", (8862, 8893), False, 'import os\n'), ((10019, 10067), 'os.path.join', 'os.path.join', (['eval_dir', 'f"""nncase_result_{i}.bin"""'], {}), "(eval_dir, f'nncase_result_{i}.bin')\n", (10031, 10067), False, 'import os\n'), ((10085, 10133), 'os.path.join', 'os.path.join', (['eval_dir', 'f"""nncase_result_{i}.txt"""'], {}), "(eval_dir, f'nncase_result_{i}.txt')\n", (10097, 10133), False, 'import os\n'), ((11928, 11977), 'os.path.join', 'os.path.join', (['infer_dir', 'f"""nncase_result_{i}.bin"""'], {}), "(infer_dir, f'nncase_result_{i}.bin')\n", (11940, 11977), False, 'import os\n'), ((11995, 12044), 'os.path.join', 'os.path.join', (['infer_dir', 'f"""nncase_result_{i}.txt"""'], {}), "(infer_dir, f'nncase_result_{i}.txt')\n", (12007, 12044), False, 'import os\n'), ((11000, 11054), 'numpy.asarray', 'np.asarray', (["[sample['data'] for sample in self.calibs]"], {}), "([sample['data'] for sample in self.calibs])\n", (11010, 11054), True, 'import numpy as np\n'), ((12639, 12684), 'os.path.join', 'os.path.join', (['case_dir', 'f"""{name}_{n}_{i}.bin"""'], {}), "(case_dir, f'{name}_{n}_{i}.bin')\n", (12651, 12684), False, 'import os\n'), ((12707, 12752), 'os.path.join', 'os.path.join', (['case_dir', 'f"""{name}_{n}_{i}.txt"""'], {}), "(case_dir, f'{name}_{n}_{i}.txt')\n", (12719, 12752), False, 'import os\n'), ((13984, 14030), 'os.path.join', 'os.path.join', (['self.case_dir', '"""test_result.txt"""'], {}), "(self.case_dir, 'test_result.txt')\n", (13996, 14030), False, 'import os\n'), ((14232, 14278), 'os.path.join', 'os.path.join', (['self.case_dir', '"""test_result.txt"""'], {}), "(self.case_dir, 'test_result.txt')\n", (14244, 14278), False, 'import os\n')] |
from os.path import getmtime
from contextlib import contextmanager
import re
import os
from pathlib import Path
import pytest
import numpy as np
import qcodes.tests.dataset
from qcodes.dataset.sqlite_base import get_experiments
from qcodes.dataset.experiment_container import Experiment
from qcodes.dataset.data_set import (DataSet, load_by_guid, load_by_counter,
load_by_id)
from qcodes.dataset.database import path_to_dbfile
from qcodes.dataset.database_extract_runs import extract_runs_into_db
from qcodes.tests.dataset.temporary_databases import two_empty_temp_db_connections
from qcodes.tests.dataset.test_descriptions import some_paramspecs
from qcodes.tests.common import error_caused_by
from qcodes.dataset.measurements import Measurement
from qcodes import Station
from qcodes.tests.instrument_mocks import DummyInstrument
@contextmanager
def raise_if_file_changed(path_to_file: str):
"""
Context manager that raises if a file is modified.
On Windows, the OS modification time resolution is 100 ns
"""
pre_operation_time = getmtime(path_to_file)
# we don't want to catch and re-raise anything, since there is no clean-up
# that we need to perform. Hence no try-except here
yield
post_operation_time = getmtime(path_to_file)
if pre_operation_time != post_operation_time:
raise RuntimeError(f'File {path_to_file} was modified.')
@pytest.fixture(scope='function')
def inst():
"""
Dummy instrument for testing, ensuring that it's instance gets closed
and removed from the global register of instruments, which, if not done,
make break other tests
"""
inst = DummyInstrument('inst', gates=['back', 'plunger', 'cutter'])
yield inst
inst.close()
def test_missing_runs_raises(two_empty_temp_db_connections, some_paramspecs):
"""
Test that an error is raised if we attempt to extract a run not present in
the source DB
"""
source_conn, target_conn = two_empty_temp_db_connections
source_exp_1 = Experiment(conn=source_conn)
# make 5 runs in first experiment
exp_1_run_ids = []
for _ in range(5):
source_dataset = DataSet(conn=source_conn, exp_id=source_exp_1.exp_id)
exp_1_run_ids.append(source_dataset.run_id)
for ps in some_paramspecs[2].values():
source_dataset.add_parameter(ps)
for val in range(10):
source_dataset.add_result({ps.name: val
for ps in some_paramspecs[2].values()})
source_dataset.mark_complete()
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
run_ids = [1, 8, 5, 3, 2, 4, 4, 4, 7, 8]
wrong_ids = [8, 7, 8]
expected_err = ("Error: not all run_ids exist in the source database. "
"The following run(s) is/are not present: "
f"{wrong_ids}")
with pytest.raises(ValueError, match=re.escape(expected_err)):
extract_runs_into_db(source_path, target_path, *run_ids)
def test_basic_extraction(two_empty_temp_db_connections, some_paramspecs):
source_conn, target_conn = two_empty_temp_db_connections
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
type_casters = {'numeric': float,
'array': (lambda x: np.array(x) if hasattr(x, '__iter__')
else np.array([x])),
'text': str}
source_exp = Experiment(conn=source_conn)
source_dataset = DataSet(conn=source_conn, name="basic_copy_paste_name")
with pytest.raises(RuntimeError) as excinfo:
extract_runs_into_db(source_path, target_path, source_dataset.run_id)
assert error_caused_by(excinfo, ('Dataset not completed. An incomplete '
'dataset can not be copied. The '
'incomplete dataset has GUID: '
f'{source_dataset.guid} and run_id: '
f'{source_dataset.run_id}'))
for ps in some_paramspecs[1].values():
source_dataset.add_parameter(ps)
for value in range(10):
result = {ps.name: type_casters[ps.type](value)
for ps in some_paramspecs[1].values()}
source_dataset.add_result(result)
source_dataset.add_metadata('goodness', 'fair')
source_dataset.add_metadata('test', True)
source_dataset.mark_complete()
extract_runs_into_db(source_path, target_path, source_dataset.run_id)
target_exp = Experiment(conn=target_conn, exp_id=1)
length1 = len(target_exp)
assert length1 == 1
# trying to insert the same run again should be a NOOP
with raise_if_file_changed(target_path):
extract_runs_into_db(source_path, target_path, source_dataset.run_id)
assert len(target_exp) == length1
target_dataset = DataSet(conn=target_conn, run_id=1)
# Now make the interesting comparisons: are the target objects the same as
# the source objects?
assert source_dataset.the_same_dataset_as(target_dataset)
source_data = source_dataset.get_data(*source_dataset.parameters.split(','))
target_data = target_dataset.get_data(*target_dataset.parameters.split(','))
assert source_data == target_data
exp_attrs = ['name', 'sample_name', 'format_string', 'started_at',
'finished_at']
for exp_attr in exp_attrs:
assert getattr(source_exp, exp_attr) == getattr(target_exp, exp_attr)
# trying to insert the same run again should be a NOOP
with raise_if_file_changed(target_path):
extract_runs_into_db(source_path, target_path, source_dataset.run_id)
def test_correct_experiment_routing(two_empty_temp_db_connections,
some_paramspecs):
"""
Test that existing experiments are correctly identified AND that multiple
insertions of the same runs don't matter (run insertion is idempotent)
"""
source_conn, target_conn = two_empty_temp_db_connections
source_exp_1 = Experiment(conn=source_conn)
# make 5 runs in first experiment
exp_1_run_ids = []
for _ in range(5):
source_dataset = DataSet(conn=source_conn, exp_id=source_exp_1.exp_id)
exp_1_run_ids.append(source_dataset.run_id)
for ps in some_paramspecs[2].values():
source_dataset.add_parameter(ps)
for val in range(10):
source_dataset.add_result({ps.name: val
for ps in some_paramspecs[2].values()})
source_dataset.mark_complete()
# make a new experiment with 1 run
source_exp_2 = Experiment(conn=source_conn)
ds = DataSet(conn=source_conn, exp_id=source_exp_2.exp_id, name="lala")
exp_2_run_ids = [ds.run_id]
for ps in some_paramspecs[2].values():
ds.add_parameter(ps)
for val in range(10):
ds.add_result({ps.name: val for ps in some_paramspecs[2].values()})
ds.mark_complete()
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
# now copy 2 runs
extract_runs_into_db(source_path, target_path, *exp_1_run_ids[:2])
target_exp1 = Experiment(conn=target_conn, exp_id=1)
assert len(target_exp1) == 2
# copy two other runs, one of them already in
extract_runs_into_db(source_path, target_path, *exp_1_run_ids[1:3])
assert len(target_exp1) == 3
# insert run from different experiment
extract_runs_into_db(source_path, target_path, ds.run_id)
assert len(target_exp1) == 3
target_exp2 = Experiment(conn=target_conn, exp_id=2)
assert len(target_exp2) == 1
# finally insert every single run from experiment 1
extract_runs_into_db(source_path, target_path, *exp_1_run_ids)
# check for idempotency once more by inserting all the runs but in another
# order
with raise_if_file_changed(target_path):
extract_runs_into_db(source_path, target_path, *exp_1_run_ids[::-1])
target_exps = get_experiments(target_conn)
assert len(target_exps) == 2
assert len(target_exp1) == 5
assert len(target_exp2) == 1
# check that all the datasets match up
for run_id in exp_1_run_ids + exp_2_run_ids:
source_ds = DataSet(conn=source_conn, run_id=run_id)
target_ds = load_by_guid(guid=source_ds.guid, conn=target_conn)
assert source_ds.the_same_dataset_as(target_ds)
source_data = source_ds.get_data(*source_ds.parameters.split(','))
target_data = target_ds.get_data(*target_ds.parameters.split(','))
assert source_data == target_data
def test_runs_from_different_experiments_raises(two_empty_temp_db_connections,
some_paramspecs):
"""
Test that inserting runs from multiple experiments raises
"""
source_conn, target_conn = two_empty_temp_db_connections
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
source_exp_1 = Experiment(conn=source_conn)
source_exp_2 = Experiment(conn=source_conn)
# make 5 runs in first experiment
exp_1_run_ids = []
for _ in range(5):
source_dataset = DataSet(conn=source_conn, exp_id=source_exp_1.exp_id)
exp_1_run_ids.append(source_dataset.run_id)
for ps in some_paramspecs[2].values():
source_dataset.add_parameter(ps)
for val in range(10):
source_dataset.add_result({ps.name: val
for ps in some_paramspecs[2].values()})
source_dataset.mark_complete()
# make 5 runs in second experiment
exp_2_run_ids = []
for _ in range(5):
source_dataset = DataSet(conn=source_conn, exp_id=source_exp_2.exp_id)
exp_2_run_ids.append(source_dataset.run_id)
for ps in some_paramspecs[2].values():
source_dataset.add_parameter(ps)
for val in range(10):
source_dataset.add_result({ps.name: val
for ps in some_paramspecs[2].values()})
source_dataset.mark_complete()
run_ids = exp_1_run_ids + exp_2_run_ids
source_exp_ids = np.unique([1, 2])
matchstring = ('Did not receive runs from a single experiment\\. '
f'Got runs from experiments {source_exp_ids}')
# make the matchstring safe to use as a regexp
matchstring = matchstring.replace('[', '\\[').replace(']', '\\]')
with pytest.raises(ValueError, match=matchstring):
extract_runs_into_db(source_path, target_path, *run_ids)
def test_extracting_dataless_run(two_empty_temp_db_connections,
some_paramspecs):
"""
Although contrived, it could happen that a run with no data is extracted
"""
source_conn, target_conn = two_empty_temp_db_connections
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
Experiment(conn=source_conn)
source_ds = DataSet(conn=source_conn)
source_ds.mark_complete()
extract_runs_into_db(source_path, target_path, source_ds.run_id)
loaded_ds = DataSet(conn=target_conn, run_id=1)
assert loaded_ds.the_same_dataset_as(source_ds)
def test_result_table_naming_and_run_id(two_empty_temp_db_connections,
some_paramspecs):
"""
Check that a correct result table name is given and that a correct run_id
is assigned
"""
source_conn, target_conn = two_empty_temp_db_connections
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
source_exp1 = Experiment(conn=source_conn)
source_ds_1_1 = DataSet(conn=source_conn, exp_id=source_exp1.exp_id)
for ps in some_paramspecs[2].values():
source_ds_1_1.add_parameter(ps)
source_ds_1_1.add_result({ps.name: 0.0
for ps in some_paramspecs[2].values()})
source_ds_1_1.mark_complete()
source_exp2 = Experiment(conn=source_conn)
source_ds_2_1 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id)
for ps in some_paramspecs[2].values():
source_ds_2_1.add_parameter(ps)
source_ds_2_1.add_result({ps.name: 0.0
for ps in some_paramspecs[2].values()})
source_ds_2_1.mark_complete()
source_ds_2_2 = DataSet(conn=source_conn,
exp_id=source_exp2.exp_id,
name="customname")
for ps in some_paramspecs[2].values():
source_ds_2_2.add_parameter(ps)
source_ds_2_2.add_result({ps.name: 0.0
for ps in some_paramspecs[2].values()})
source_ds_2_2.mark_complete()
extract_runs_into_db(source_path, target_path, source_ds_2_2.run_id)
# The target ds ought to have a runs table "customname-1-1"
# and ought to be the same dataset as its "ancestor"
target_ds = DataSet(conn=target_conn, run_id=1)
assert target_ds.table_name == "customname-1-1"
assert target_ds.the_same_dataset_as(source_ds_2_2)
def test_load_by_X_functions(two_empty_temp_db_connections,
some_paramspecs):
"""
Test some different loading functions
"""
source_conn, target_conn = two_empty_temp_db_connections
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
source_exp1 = Experiment(conn=source_conn)
source_ds_1_1 = DataSet(conn=source_conn, exp_id=source_exp1.exp_id)
for ps in some_paramspecs[2].values():
source_ds_1_1.add_parameter(ps)
source_ds_1_1.add_result({ps.name: 0.0
for ps in some_paramspecs[2].values()})
source_ds_1_1.mark_complete()
source_exp2 = Experiment(conn=source_conn)
source_ds_2_1 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id)
for ps in some_paramspecs[2].values():
source_ds_2_1.add_parameter(ps)
source_ds_2_1.add_result({ps.name: 0.0
for ps in some_paramspecs[2].values()})
source_ds_2_1.mark_complete()
source_ds_2_2 = DataSet(conn=source_conn,
exp_id=source_exp2.exp_id,
name="customname")
for ps in some_paramspecs[2].values():
source_ds_2_2.add_parameter(ps)
source_ds_2_2.add_result({ps.name: 0.0
for ps in some_paramspecs[2].values()})
source_ds_2_2.mark_complete()
extract_runs_into_db(source_path, target_path, source_ds_2_2.run_id)
test_ds = load_by_guid(source_ds_2_2.guid, target_conn)
assert source_ds_2_2.the_same_dataset_as(test_ds)
test_ds = load_by_id(1, target_conn)
assert source_ds_2_2.the_same_dataset_as(test_ds)
test_ds = load_by_counter(1, 1, target_conn)
assert source_ds_2_2.the_same_dataset_as(test_ds)
def test_old_versions_not_touched(two_empty_temp_db_connections,
some_paramspecs):
source_conn, target_conn = two_empty_temp_db_connections
target_path = path_to_dbfile(target_conn)
source_path = path_to_dbfile(source_conn)
fixturepath = os.sep.join(qcodes.tests.dataset.__file__.split(os.sep)[:-1])
fixturepath = os.path.join(fixturepath,
'fixtures', 'db_files', 'version2',
'some_runs.db')
if not os.path.exists(fixturepath):
pytest.skip("No db-file fixtures found. You can generate test db-files"
" using the scripts in the legacy_DB_generation folder")
# First test that we can not use an old version as source
with raise_if_file_changed(fixturepath):
with pytest.warns(UserWarning) as warning:
extract_runs_into_db(fixturepath, target_path, 1)
expected_mssg = ('Source DB version is 2, but this '
'function needs it to be in version 3. '
'Run this function again with '
'upgrade_source_db=True to auto-upgrade '
'the source DB file.')
assert warning[0].message.args[0] == expected_mssg
# Then test that we can not use an old version as target
# first create a run in the new version source
source_exp = Experiment(conn=source_conn)
source_ds = DataSet(conn=source_conn, exp_id=source_exp.exp_id)
for ps in some_paramspecs[2].values():
source_ds.add_parameter(ps)
source_ds.add_result({ps.name: 0.0
for ps in some_paramspecs[2].values()})
source_ds.mark_complete()
with raise_if_file_changed(fixturepath):
with pytest.warns(UserWarning) as warning:
extract_runs_into_db(source_path, fixturepath, 1)
expected_mssg = ('Target DB version is 2, but this '
'function needs it to be in version 3. '
'Run this function again with '
'upgrade_target_db=True to auto-upgrade '
'the target DB file.')
assert warning[0].message.args[0] == expected_mssg
def test_experiments_with_NULL_sample_name(two_empty_temp_db_connections,
some_paramspecs):
"""
In older API versions (corresponding to DB version 3),
users could get away with setting the sample name to None
This test checks that such an experiment gets correctly recognised and
is thus not ever re-inserted into the target DB
"""
source_conn, target_conn = two_empty_temp_db_connections
source_exp_1 = Experiment(conn=source_conn, name='null_sample_name')
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
# make 5 runs in experiment
exp_1_run_ids = []
for _ in range(5):
source_dataset = DataSet(conn=source_conn, exp_id=source_exp_1.exp_id)
exp_1_run_ids.append(source_dataset.run_id)
for ps in some_paramspecs[2].values():
source_dataset.add_parameter(ps)
for val in range(10):
source_dataset.add_result({ps.name: val
for ps in some_paramspecs[2].values()})
source_dataset.mark_complete()
sql = """
UPDATE experiments
SET sample_name = NULL
WHERE exp_id = 1
"""
source_conn.execute(sql)
source_conn.commit()
assert source_exp_1.sample_name is None
extract_runs_into_db(source_path, target_path, 1, 2, 3, 4, 5)
assert len(get_experiments(target_conn)) == 1
extract_runs_into_db(source_path, target_path, 1, 2, 3, 4, 5)
assert len(get_experiments(target_conn)) == 1
assert len(Experiment(exp_id=1, conn=target_conn)) == 5
def test_integration_station_and_measurement(two_empty_temp_db_connections,
inst):
"""
An integration test where the runs in the source DB file are produced
with the Measurement object and there is a Station as well
"""
source_conn, target_conn = two_empty_temp_db_connections
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
source_exp = Experiment(conn=source_conn)
# Set up measurement scenario
station = Station(inst)
meas = Measurement(exp=source_exp, station=station)
meas.register_parameter(inst.back)
meas.register_parameter(inst.plunger)
meas.register_parameter(inst.cutter, setpoints=(inst.back, inst.plunger))
with meas.run() as datasaver:
for back_v in [1, 2, 3]:
for plung_v in [-3, -2.5, 0]:
datasaver.add_result((inst.back, back_v),
(inst.plunger, plung_v),
(inst.cutter, back_v+plung_v))
extract_runs_into_db(source_path, target_path, 1)
target_ds = DataSet(conn=target_conn, run_id=1)
assert datasaver.dataset.the_same_dataset_as(target_ds)
def test_atomicity(two_empty_temp_db_connections, some_paramspecs):
"""
Test the atomicity of the transaction by extracting and inserting two
runs where the second one is not completed. The not completed error must
roll back any changes to the target
"""
source_conn, target_conn = two_empty_temp_db_connections
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
# The target file must exist for us to be able to see whether it has
# changed
Path(target_path).touch()
source_exp = Experiment(conn=source_conn)
source_ds_1 = DataSet(conn=source_conn, exp_id=source_exp.exp_id)
for ps in some_paramspecs[2].values():
source_ds_1.add_parameter(ps)
source_ds_1.add_result({ps.name: 2.1
for ps in some_paramspecs[2].values()})
source_ds_1.mark_complete()
source_ds_2 = DataSet(conn=source_conn, exp_id=source_exp.exp_id)
for ps in some_paramspecs[2].values():
source_ds_2.add_parameter(ps)
source_ds_2.add_result({ps.name: 2.1
for ps in some_paramspecs[2].values()})
# This dataset is NOT marked as completed
# now check that the target file is untouched
with raise_if_file_changed(target_path):
# although the not completed error is a ValueError, we get the
# RuntimeError from SQLite
with pytest.raises(RuntimeError):
extract_runs_into_db(source_path, target_path, 1, 2)
def test_column_mismatch(two_empty_temp_db_connections, some_paramspecs, inst):
"""
Test insertion of runs with no metadata and no snapshot into a DB already
containing a run that has both
"""
source_conn, target_conn = two_empty_temp_db_connections
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
target_exp = Experiment(conn=target_conn)
# Set up measurement scenario
station = Station(inst)
meas = Measurement(exp=target_exp, station=station)
meas.register_parameter(inst.back)
meas.register_parameter(inst.plunger)
meas.register_parameter(inst.cutter, setpoints=(inst.back, inst.plunger))
with meas.run() as datasaver:
for back_v in [1, 2, 3]:
for plung_v in [-3, -2.5, 0]:
datasaver.add_result((inst.back, back_v),
(inst.plunger, plung_v),
(inst.cutter, back_v+plung_v))
datasaver.dataset.add_metadata('meta_tag', 'meta_value')
Experiment(conn=source_conn)
source_ds = DataSet(conn=source_conn)
for ps in some_paramspecs[2].values():
source_ds.add_parameter(ps)
source_ds.add_result({ps.name: 2.1
for ps in some_paramspecs[2].values()})
source_ds.mark_complete()
extract_runs_into_db(source_path, target_path, 1)
# compare
target_copied_ds = DataSet(conn=target_conn, run_id=2)
assert target_copied_ds.the_same_dataset_as(source_ds)
| [
"re.escape",
"qcodes.tests.instrument_mocks.DummyInstrument",
"numpy.array",
"qcodes.dataset.data_set.DataSet",
"qcodes.dataset.data_set.load_by_id",
"pytest.fixture",
"qcodes.dataset.experiment_container.Experiment",
"qcodes.dataset.database_extract_runs.extract_runs_into_db",
"os.path.exists",
"... | [((1428, 1460), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1442, 1460), False, 'import pytest\n'), ((1093, 1115), 'os.path.getmtime', 'getmtime', (['path_to_file'], {}), '(path_to_file)\n', (1101, 1115), False, 'from os.path import getmtime\n'), ((1287, 1309), 'os.path.getmtime', 'getmtime', (['path_to_file'], {}), '(path_to_file)\n', (1295, 1309), False, 'from os.path import getmtime\n'), ((1678, 1738), 'qcodes.tests.instrument_mocks.DummyInstrument', 'DummyInstrument', (['"""inst"""'], {'gates': "['back', 'plunger', 'cutter']"}), "('inst', gates=['back', 'plunger', 'cutter'])\n", (1693, 1738), False, 'from qcodes.tests.instrument_mocks import DummyInstrument\n'), ((2045, 2073), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (2055, 2073), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((2605, 2632), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['source_conn'], {}), '(source_conn)\n', (2619, 2632), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((2651, 2678), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['target_conn'], {}), '(target_conn)\n', (2665, 2678), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((3218, 3245), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['source_conn'], {}), '(source_conn)\n', (3232, 3245), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((3264, 3291), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['target_conn'], {}), '(target_conn)\n', (3278, 3291), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((3511, 3539), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (3521, 3539), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((3561, 3616), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'name': '"""basic_copy_paste_name"""'}), "(conn=source_conn, name='basic_copy_paste_name')\n", (3568, 3616), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((3757, 3949), 'qcodes.tests.common.error_caused_by', 'error_caused_by', (['excinfo', 'f"""Dataset not completed. An incomplete dataset can not be copied. The incomplete dataset has GUID: {source_dataset.guid} and run_id: {source_dataset.run_id}"""'], {}), "(excinfo,\n f'Dataset not completed. An incomplete dataset can not be copied. The incomplete dataset has GUID: {source_dataset.guid} and run_id: {source_dataset.run_id}'\n )\n", (3772, 3949), False, 'from qcodes.tests.common import error_caused_by\n'), ((4513, 4582), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', 'source_dataset.run_id'], {}), '(source_path, target_path, source_dataset.run_id)\n', (4533, 4582), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((4601, 4639), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'target_conn', 'exp_id': '(1)'}), '(conn=target_conn, exp_id=1)\n', (4611, 4639), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((4939, 4974), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'target_conn', 'run_id': '(1)'}), '(conn=target_conn, run_id=1)\n', (4946, 4974), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((6116, 6144), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (6126, 6144), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((6717, 6745), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (6727, 6745), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((6755, 6821), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'exp_id': 'source_exp_2.exp_id', 'name': '"""lala"""'}), "(conn=source_conn, exp_id=source_exp_2.exp_id, name='lala')\n", (6762, 6821), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((7073, 7100), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['source_conn'], {}), '(source_conn)\n', (7087, 7100), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((7119, 7146), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['target_conn'], {}), '(target_conn)\n', (7133, 7146), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((7174, 7240), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', '*exp_1_run_ids[:2]'], {}), '(source_path, target_path, *exp_1_run_ids[:2])\n', (7194, 7240), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((7260, 7298), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'target_conn', 'exp_id': '(1)'}), '(conn=target_conn, exp_id=1)\n', (7270, 7298), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((7388, 7455), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', '*exp_1_run_ids[1:3]'], {}), '(source_path, target_path, *exp_1_run_ids[1:3])\n', (7408, 7455), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((7538, 7595), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', 'ds.run_id'], {}), '(source_path, target_path, ds.run_id)\n', (7558, 7595), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((7649, 7687), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'target_conn', 'exp_id': '(2)'}), '(conn=target_conn, exp_id=2)\n', (7659, 7687), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((7784, 7846), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', '*exp_1_run_ids'], {}), '(source_path, target_path, *exp_1_run_ids)\n', (7804, 7846), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((8080, 8108), 'qcodes.dataset.sqlite_base.get_experiments', 'get_experiments', (['target_conn'], {}), '(target_conn)\n', (8095, 8108), False, 'from qcodes.dataset.sqlite_base import get_experiments\n'), ((8991, 9018), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['source_conn'], {}), '(source_conn)\n', (9005, 9018), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((9037, 9064), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['target_conn'], {}), '(target_conn)\n', (9051, 9064), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((9085, 9113), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (9095, 9113), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((9133, 9161), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (9143, 9161), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((10253, 10270), 'numpy.unique', 'np.unique', (['[1, 2]'], {}), '([1, 2])\n', (10262, 10270), True, 'import numpy as np\n'), ((10939, 10966), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['source_conn'], {}), '(source_conn)\n', (10953, 10966), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((10985, 11012), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['target_conn'], {}), '(target_conn)\n', (10999, 11012), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((11018, 11046), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (11028, 11046), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((11064, 11089), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (11071, 11089), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((11126, 11190), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', 'source_ds.run_id'], {}), '(source_path, target_path, source_ds.run_id)\n', (11146, 11190), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((11208, 11243), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'target_conn', 'run_id': '(1)'}), '(conn=target_conn, run_id=1)\n', (11215, 11243), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((11618, 11645), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['source_conn'], {}), '(source_conn)\n', (11632, 11645), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((11664, 11691), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['target_conn'], {}), '(target_conn)\n', (11678, 11691), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((11711, 11739), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (11721, 11739), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((11760, 11812), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'exp_id': 'source_exp1.exp_id'}), '(conn=source_conn, exp_id=source_exp1.exp_id)\n', (11767, 11812), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((12062, 12090), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (12072, 12090), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((12111, 12163), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'exp_id': 'source_exp2.exp_id'}), '(conn=source_conn, exp_id=source_exp2.exp_id)\n', (12118, 12163), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((12414, 12485), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'exp_id': 'source_exp2.exp_id', 'name': '"""customname"""'}), "(conn=source_conn, exp_id=source_exp2.exp_id, name='customname')\n", (12421, 12485), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((12777, 12845), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', 'source_ds_2_2.run_id'], {}), '(source_path, target_path, source_ds_2_2.run_id)\n', (12797, 12845), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((12984, 13019), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'target_conn', 'run_id': '(1)'}), '(conn=target_conn, run_id=1)\n', (12991, 13019), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((13376, 13403), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['source_conn'], {}), '(source_conn)\n', (13390, 13403), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((13422, 13449), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['target_conn'], {}), '(target_conn)\n', (13436, 13449), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((13469, 13497), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (13479, 13497), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((13518, 13570), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'exp_id': 'source_exp1.exp_id'}), '(conn=source_conn, exp_id=source_exp1.exp_id)\n', (13525, 13570), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((13820, 13848), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (13830, 13848), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((13869, 13921), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'exp_id': 'source_exp2.exp_id'}), '(conn=source_conn, exp_id=source_exp2.exp_id)\n', (13876, 13921), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((14172, 14243), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'exp_id': 'source_exp2.exp_id', 'name': '"""customname"""'}), "(conn=source_conn, exp_id=source_exp2.exp_id, name='customname')\n", (14179, 14243), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((14535, 14603), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', 'source_ds_2_2.run_id'], {}), '(source_path, target_path, source_ds_2_2.run_id)\n', (14555, 14603), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((14619, 14664), 'qcodes.dataset.data_set.load_by_guid', 'load_by_guid', (['source_ds_2_2.guid', 'target_conn'], {}), '(source_ds_2_2.guid, target_conn)\n', (14631, 14664), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((14734, 14760), 'qcodes.dataset.data_set.load_by_id', 'load_by_id', (['(1)', 'target_conn'], {}), '(1, target_conn)\n', (14744, 14760), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((14830, 14864), 'qcodes.dataset.data_set.load_by_counter', 'load_by_counter', (['(1)', '(1)', 'target_conn'], {}), '(1, 1, target_conn)\n', (14845, 14864), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((15119, 15146), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['target_conn'], {}), '(target_conn)\n', (15133, 15146), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((15165, 15192), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['source_conn'], {}), '(source_conn)\n', (15179, 15192), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((15292, 15369), 'os.path.join', 'os.path.join', (['fixturepath', '"""fixtures"""', '"""db_files"""', '"""version2"""', '"""some_runs.db"""'], {}), "(fixturepath, 'fixtures', 'db_files', 'version2', 'some_runs.db')\n", (15304, 15369), False, 'import os\n'), ((16364, 16392), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (16374, 16392), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((16409, 16460), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'exp_id': 'source_exp.exp_id'}), '(conn=source_conn, exp_id=source_exp.exp_id)\n', (16416, 16460), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((17703, 17756), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'source_conn', 'name': '"""null_sample_name"""'}), "(conn=source_conn, name='null_sample_name')\n", (17713, 17756), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((17776, 17803), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['source_conn'], {}), '(source_conn)\n', (17790, 17803), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((17822, 17849), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['target_conn'], {}), '(target_conn)\n', (17836, 17849), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((18578, 18639), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', '(1)', '(2)', '(3)', '(4)', '(5)'], {}), '(source_path, target_path, 1, 2, 3, 4, 5)\n', (18598, 18639), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((18696, 18757), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', '(1)', '(2)', '(3)', '(4)', '(5)'], {}), '(source_path, target_path, 1, 2, 3, 4, 5)\n', (18716, 18757), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((19232, 19259), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['source_conn'], {}), '(source_conn)\n', (19246, 19259), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((19278, 19305), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['target_conn'], {}), '(target_conn)\n', (19292, 19305), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((19324, 19352), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (19334, 19352), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((19402, 19415), 'qcodes.Station', 'Station', (['inst'], {}), '(inst)\n', (19409, 19415), False, 'from qcodes import Station\n'), ((19428, 19472), 'qcodes.dataset.measurements.Measurement', 'Measurement', ([], {'exp': 'source_exp', 'station': 'station'}), '(exp=source_exp, station=station)\n', (19439, 19472), False, 'from qcodes.dataset.measurements import Measurement\n'), ((19935, 19984), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', '(1)'], {}), '(source_path, target_path, 1)\n', (19955, 19984), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((20002, 20037), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'target_conn', 'run_id': '(1)'}), '(conn=target_conn, run_id=1)\n', (20009, 20037), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((20456, 20483), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['source_conn'], {}), '(source_conn)\n', (20470, 20483), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((20502, 20529), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['target_conn'], {}), '(target_conn)\n', (20516, 20529), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((20666, 20694), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (20676, 20694), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((20713, 20764), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'exp_id': 'source_exp.exp_id'}), '(conn=source_conn, exp_id=source_exp.exp_id)\n', (20720, 20764), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((21006, 21057), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'exp_id': 'source_exp.exp_id'}), '(conn=source_conn, exp_id=source_exp.exp_id)\n', (21013, 21057), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((21894, 21921), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['source_conn'], {}), '(source_conn)\n', (21908, 21921), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((21940, 21967), 'qcodes.dataset.database.path_to_dbfile', 'path_to_dbfile', (['target_conn'], {}), '(target_conn)\n', (21954, 21967), False, 'from qcodes.dataset.database import path_to_dbfile\n'), ((21986, 22014), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'target_conn'}), '(conn=target_conn)\n', (21996, 22014), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((22064, 22077), 'qcodes.Station', 'Station', (['inst'], {}), '(inst)\n', (22071, 22077), False, 'from qcodes import Station\n'), ((22090, 22134), 'qcodes.dataset.measurements.Measurement', 'Measurement', ([], {'exp': 'target_exp', 'station': 'station'}), '(exp=target_exp, station=station)\n', (22101, 22134), False, 'from qcodes.dataset.measurements import Measurement\n'), ((22658, 22686), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (22668, 22686), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((22703, 22728), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn'}), '(conn=source_conn)\n', (22710, 22728), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((22948, 22997), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', '(1)'], {}), '(source_path, target_path, 1)\n', (22968, 22997), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((23036, 23071), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'target_conn', 'run_id': '(2)'}), '(conn=target_conn, run_id=2)\n', (23043, 23071), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((2186, 2239), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'exp_id': 'source_exp_1.exp_id'}), '(conn=source_conn, exp_id=source_exp_1.exp_id)\n', (2193, 2239), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((3004, 3060), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', '*run_ids'], {}), '(source_path, target_path, *run_ids)\n', (3024, 3060), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((3627, 3654), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3640, 3654), False, 'import pytest\n'), ((3675, 3744), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', 'source_dataset.run_id'], {}), '(source_path, target_path, source_dataset.run_id)\n', (3695, 3744), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((4808, 4877), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', 'source_dataset.run_id'], {}), '(source_path, target_path, source_dataset.run_id)\n', (4828, 4877), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((5673, 5742), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', 'source_dataset.run_id'], {}), '(source_path, target_path, source_dataset.run_id)\n', (5693, 5742), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((6257, 6310), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'exp_id': 'source_exp_1.exp_id'}), '(conn=source_conn, exp_id=source_exp_1.exp_id)\n', (6264, 6310), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((7992, 8060), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', '*exp_1_run_ids[::-1]'], {}), '(source_path, target_path, *exp_1_run_ids[::-1])\n', (8012, 8060), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((8322, 8362), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'run_id': 'run_id'}), '(conn=source_conn, run_id=run_id)\n', (8329, 8362), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((8383, 8434), 'qcodes.dataset.data_set.load_by_guid', 'load_by_guid', ([], {'guid': 'source_ds.guid', 'conn': 'target_conn'}), '(guid=source_ds.guid, conn=target_conn)\n', (8395, 8434), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((9274, 9327), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'exp_id': 'source_exp_1.exp_id'}), '(conn=source_conn, exp_id=source_exp_1.exp_id)\n', (9281, 9327), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((9787, 9840), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'exp_id': 'source_exp_2.exp_id'}), '(conn=source_conn, exp_id=source_exp_2.exp_id)\n', (9794, 9840), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((10538, 10582), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'matchstring'}), '(ValueError, match=matchstring)\n', (10551, 10582), False, 'import pytest\n'), ((10592, 10648), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', '*run_ids'], {}), '(source_path, target_path, *run_ids)\n', (10612, 10648), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((15443, 15470), 'os.path.exists', 'os.path.exists', (['fixturepath'], {}), '(fixturepath)\n', (15457, 15470), False, 'import os\n'), ((15480, 15615), 'pytest.skip', 'pytest.skip', (['"""No db-file fixtures found. You can generate test db-files using the scripts in the legacy_DB_generation folder"""'], {}), "(\n 'No db-file fixtures found. You can generate test db-files using the scripts in the legacy_DB_generation folder'\n )\n", (15491, 15615), False, 'import pytest\n'), ((17956, 18009), 'qcodes.dataset.data_set.DataSet', 'DataSet', ([], {'conn': 'source_conn', 'exp_id': 'source_exp_1.exp_id'}), '(conn=source_conn, exp_id=source_exp_1.exp_id)\n', (17963, 18009), False, 'from qcodes.dataset.data_set import DataSet, load_by_guid, load_by_counter, load_by_id\n'), ((15751, 15776), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (15763, 15776), False, 'import pytest\n'), ((15801, 15850), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['fixturepath', 'target_path', '(1)'], {}), '(fixturepath, target_path, 1)\n', (15821, 15850), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((16739, 16764), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (16751, 16764), False, 'import pytest\n'), ((16789, 16838), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'fixturepath', '(1)'], {}), '(source_path, fixturepath, 1)\n', (16809, 16838), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((18656, 18684), 'qcodes.dataset.sqlite_base.get_experiments', 'get_experiments', (['target_conn'], {}), '(target_conn)\n', (18671, 18684), False, 'from qcodes.dataset.sqlite_base import get_experiments\n'), ((18774, 18802), 'qcodes.dataset.sqlite_base.get_experiments', 'get_experiments', (['target_conn'], {}), '(target_conn)\n', (18789, 18802), False, 'from qcodes.dataset.sqlite_base import get_experiments\n'), ((18825, 18863), 'qcodes.dataset.experiment_container.Experiment', 'Experiment', ([], {'exp_id': '(1)', 'conn': 'target_conn'}), '(exp_id=1, conn=target_conn)\n', (18835, 18863), False, 'from qcodes.dataset.experiment_container import Experiment\n'), ((20622, 20639), 'pathlib.Path', 'Path', (['target_path'], {}), '(target_path)\n', (20626, 20639), False, 'from pathlib import Path\n'), ((21509, 21536), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (21522, 21536), False, 'import pytest\n'), ((21550, 21602), 'qcodes.dataset.database_extract_runs.extract_runs_into_db', 'extract_runs_into_db', (['source_path', 'target_path', '(1)', '(2)'], {}), '(source_path, target_path, 1, 2)\n', (21570, 21602), False, 'from qcodes.dataset.database_extract_runs import extract_runs_into_db\n'), ((2970, 2993), 're.escape', 're.escape', (['expected_err'], {}), '(expected_err)\n', (2979, 2993), False, 'import re\n'), ((3371, 3382), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3379, 3382), True, 'import numpy as np\n'), ((3444, 3457), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (3452, 3457), True, 'import numpy as np\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _vta-mat-mult-opt:
Matrix Multiply Blocking
========================
**Author**: `<NAME> <https://homes.cs.washington.edu/~moreau/>`_
This tutorial provides an overview on how to use TVM to map matrix
multiplication efficiently on the VTA design.
We recommend covering the :ref:`basic-mat-mult` tutorial first.
In this tutorial, we will demonstrate TVM schedule optimizations to break large
neural network operators down onto smaller blocks to achieve computation within
limited hardware accelerator resources.
"""
######################################################################
# RPC Setup
# ---------
# We start by programming the Pynq's FPGA and building its RPC runtime.
from __future__ import absolute_import, print_function
import os
import tvm
from tvm import te
import vta
import numpy as np
from tvm import rpc
from tvm.contrib import util
from vta.testing import simulator
# Load VTA parameters from the vta/config/vta_config.json file
env = vta.get_env()
# We read the Pynq RPC host IP address and port number from the OS environment
host = os.environ.get("VTA_RPC_HOST", "192.168.2.99")
port = int(os.environ.get("VTA_RPC_PORT", "9091"))
# We configure both the bitstream and the runtime system on the Pynq
# to match the VTA configuration specified by the vta_config.json file.
if env.TARGET == "pynq":
# Make sure that TVM was compiled with RPC=1
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
# Reconfigure the JIT runtime
vta.reconfig_runtime(remote)
# Program the FPGA with a pre-compiled VTA bitstream.
# You can program the FPGA with your own custom bitstream
# by passing the path to the bitstream file instead of None.
vta.program_fpga(remote, bitstream=None)
# In simulation mode, host the RPC server locally.
elif env.TARGET in ["sim", "tsim"]:
remote = rpc.LocalSession()
######################################################################
# Computation Declaration
# -----------------------
# As a first step, we need to describe our matrix multiplication computation.
# We define the matrix multiplication as the computation one would find in a
# fully connected layer, defined by its batch size, input channels, and output
# channels.
# These have to be integer multiples of the VTA tensor shape:
# :code:`BATCH`, :code:`BLOCK_IN`, and :code:`BLOCK_OUT` respectively.
#
# We've added extra operators to the matrix multiplication that apply
# shifting and clipping to the output in order to mimic a fixed-point
# matrix multiplication followed by a rectified linear activation.
# We describe the TVM dataflow graph of the fully connected layer below:
#
# .. image:: https://raw.githubusercontent.com/uwsaml/web-data/master/vta/tutorial/fc_dataflow.png
# :align: center
#
# This computation is intentionally too large to fit onto VTA's on-chip
# buffers all at once. Therefore in the scheduling phase we'll
# rely on computation blocking strategies to break the computation down into
# manageable chunks.
# Fully connected layer dimensions: 1024 x 1024
batch_size = 1
in_channels = 1024
out_channels = 1024
assert batch_size % env.BATCH == 0
assert in_channels % env.BLOCK_IN == 0
assert out_channels % env.BLOCK_OUT == 0
# Let's derive the tiled input tensor shapes
data_shape = (batch_size // env.BATCH,
in_channels // env.BLOCK_IN,
env.BATCH,
env.BLOCK_IN)
weight_shape = (out_channels // env.BLOCK_OUT,
in_channels // env.BLOCK_IN,
env.BLOCK_OUT,
env.BLOCK_IN)
output_shape = (batch_size // env.BATCH,
out_channels // env.BLOCK_OUT,
env.BATCH,
env.BLOCK_OUT)
num_ops = in_channels * out_channels * batch_size * 2
# Reduction axes
ic = te.reduce_axis((0, in_channels // env.BLOCK_IN), name='ic')
ic_tns = te.reduce_axis((0, env.BLOCK_IN), name='ic_tns')
# Input placeholder tensors
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
weight = te.placeholder(weight_shape, name="weight", dtype=env.wgt_dtype)
# Copy buffers
data_buf = te.compute(data_shape,
lambda *i: data(*i),
"data_buf")
weight_buf = te.compute(weight_shape,
lambda *i: weight(*i),
"weight_buf")
# Declare matrix multiply computation
res_gemm = te.compute(output_shape,
lambda bo, co, bi, ci: te.sum(
data_buf[bo, ic, bi, ic_tns].astype(env.acc_dtype) *
weight_buf[co, ic, ci, ic_tns].astype(env.acc_dtype),
axis=[ic, ic_tns]),
name="res_gem")
# Add shift stage for fix-point normalization
res_shr = te.compute(output_shape,
lambda *i: res_gemm(*i) >> env.INP_WIDTH,
name="res_shr")
# Apply clipping between (0, input max value)
inp_max = (1<<(env.INP_WIDTH-1))-1
res_max = te.compute(output_shape,
lambda *i: tvm.te.max(res_shr(*i), 0),
"res_max")
res_min = te.compute(output_shape,
lambda *i: tvm.te.min(res_max(*i), inp_max),
"res_min")
# Apply typecast to input data type before sending results back
res = te.compute(output_shape,
lambda *i: res_min(*i).astype(env.inp_dtype),
name="res")
######################################################################
# Scheduling the Computation
# --------------------------
# We'll look at a set of schedule transformations necessary to map the
# matrix multiplications onto VTA in an efficient fashion.
# Those include:
#
# - Computation blocking
# - Lowering to VTA hardware intrinsics
# Create TVM schedule
s = te.create_schedule(res.op)
# Let's look at the default TVM schedule
print(tvm.lower(s, [data, weight, res], simple_mode=True))
######################################################################
# Blocking the Computation
# ~~~~~~~~~~~~~~~~~~~~~~~~
# The matrix multiplication is by default too large for activations or weights
# to fit on VTA's on-chip buffers all at once.
# We block the (1, 1024) by (1024, 1024) matrix multiplication into
# smaller (1, 256) by (256, 256) matrix multiplications so the intermediate
# tensors can fit on the accelerator's on-chip SRAM.
# This approach is similar to blocking techniques applied to CPUs and GPUs in
# order to increase cache hit rate.
#
# We perform blocking along each axes (the batch axis being untouched since
# we are performing singe-batch inference).
# We also leave the inner-most tensorization axes as-is in order to allow
# TVM to pattern-match tensorization.
# We show the outcome of blocking on the computation schedule in the diagram
# below:
#
# .. image:: https://raw.githubusercontent.com/uwsaml/web-data/master/vta/tutorial/blocking.png
# :align: center
# :width: 480px
#
# .. note::
#
# The code after loop splitting and reordering is equivalent to the following
# pseudo-code. We ignore the batch axis since we are only performing single-batch
# inference in this example:
#
# .. code-block:: c
#
# for (int oc_out = 0; oc_out < 4; ++oc_out) {
# // Initialization loop
# for (int oc_inn = 0; oc_inn < 16; ++oc_inn) {
# for (int oc_tns = 0; oc_tns < 16; ++oc_tns) {
# int j = (oc_out * 16 + oc_inn) * 16 + oc_tns;
# C[0][j] = 0;
# }
# }
# for (int ic_out = 0; ic_out < 4; ++ic_out) {
# // Block loop
# for (int oc_inn = 0; oc_inn < 16; ++oc_inn) {
# for (int ic_inn = 0; ic_inn < 16; ++ic_inn) {
# // Tensorization loop
# for (int oc_tns = 0; oc_tns < 16; ++oc_tns) {
# for (int ic_tns = 0; ic_tns < 16; ++ic_tns) {
# int i = (ic_out * 16 + ic_inn) * 16 + ic_tns;
# int j = (oc_out * 16 + oc_inn) * 16 + oc_tns;
# C[0][i] = C[0][i] + A[0][i] * B[j][i];
# }
# }
# }
# }
# }
# }
# }
# Let's define tiling sizes (expressed in multiples of VTA tensor shape size)
b_block = 1 // env.BATCH
i_block = 256 // env.BLOCK_IN
o_block = 256 // env.BLOCK_OUT
# Tile the output tensor along the batch and output channel dimensions
# (since by default we are doing single batch inference, the split along
# the batch dimension has no effect)
b, oc, b_tns, oc_tns = s[res].op.axis
b_out, b_inn = s[res].split(b, b_block)
oc_out, oc_inn = s[res].split(oc, o_block)
s[res].reorder(b_out, oc_out, b_inn, oc_inn)
# Move intermediate computation into each output compute tile
s[res_gemm].compute_at(s[res], oc_out)
s[res_shr].compute_at(s[res], oc_out)
s[res_max].compute_at(s[res], oc_out)
s[res_min].compute_at(s[res], oc_out)
# Apply additional loop split along reduction axis (input channel)
b_inn, oc_inn, b_tns, oc_tns = s[res_gemm].op.axis
ic_out, ic_inn = s[res_gemm].split(ic, i_block)
# Reorder axes. We move the ic_out axis all the way out of the GEMM
# loop to block along the reduction axis
s[res_gemm].reorder(ic_out, b_inn, oc_inn, ic_inn, b_tns, oc_tns, ic_tns)
# Let's look at the current TVM schedule after blocking
print(tvm.lower(s, [data, weight, res], simple_mode=True))
######################################################################
# Lowering Copies to DMA Transfers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Next we set the buffer scopes to the corresponding on-chip VTA SRAM buffers.
# We move the load loops into the matrix multiply computation loop to stage
# memory loads such that they fit in the on-chip SRAM buffers.
# Finally we annotate the load/store loop outer axes with the DMA copy pragma
# to perform bulk memory transfers on VTA.
# Set scope of SRAM buffers
s[data_buf].set_scope(env.inp_scope)
s[weight_buf].set_scope(env.wgt_scope)
s[res_gemm].set_scope(env.acc_scope)
s[res_shr].set_scope(env.acc_scope)
s[res_min].set_scope(env.acc_scope)
s[res_max].set_scope(env.acc_scope)
# Block data and weight cache reads
s[data_buf].compute_at(s[res_gemm], ic_out)
s[weight_buf].compute_at(s[res_gemm], ic_out)
# Use DMA copy pragma on DRAM->SRAM operations
s[data_buf].pragma(s[data_buf].op.axis[0], env.dma_copy)
s[weight_buf].pragma(s[weight_buf].op.axis[0], env.dma_copy)
# Use DMA copy pragma on SRAM->DRAM operation
# (this implies that these copies should be performed along b_inn,
# or result axis 2)
s[res].pragma(s[res].op.axis[2], env.dma_copy)
######################################################################
# Lowering Computation to VTA Compute Intrinsics
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The last phase is to lower the computation loops down to VTA hardware
# intrinsics by mapping the matrix multiplication to tensor intrinsics,
# and mapping the shift, and clipping computation to the vector ALU.
# Apply tensorization over the batch tensor tile axis
s[res_gemm].tensorize(b_tns, env.gemm)
# Add an ALU pragma over the shift and clipping operations
s[res_shr].pragma(s[res_shr].op.axis[0], env.alu)
s[res_min].pragma(s[res_min].op.axis[0], env.alu)
s[res_max].pragma(s[res_max].op.axis[0], env.alu)
# Let's look at the final lowered TVM schedule after lowering memory
# loads/stores down to DMA copy intrinsics, and the computation down to
# VTA compute intrinsics.
print(vta.lower(s, [data, weight, res], simple_mode=True))
######################################################################
# TVM Compilation and Verification
# --------------------------------
# After specifying the schedule, we can compile it into a TVM function.
# We save the module so we can send it over RPC.
# We run the function and verify it against a numpy implementation to
# ensure correctness.
# Compile the TVM module
my_gemm = vta.build(s, [data, weight, res], "ext_dev", env.target_host, name="my_gemm")
temp = util.tempdir()
my_gemm.save(temp.relpath("gemm.o"))
remote.upload(temp.relpath("gemm.o"))
f = remote.load_module("gemm.o")
# Get the remote device context
ctx = remote.ext_dev(0)
# Initialize the data and weight arrays randomly in the int range of (-128, 128]
data_np = np.random.randint(
-128, 128, size=(batch_size, in_channels)).astype(data.dtype)
weight_np = np.random.randint(
-128, 128, size=(out_channels, in_channels)).astype(weight.dtype)
# Apply packing to the data and weight arrays from a 2D to a 4D packed layout
data_packed = data_np.reshape(batch_size // env.BATCH,
env.BATCH,
in_channels // env.BLOCK_IN,
env.BLOCK_IN).transpose((0, 2, 1, 3))
weight_packed = weight_np.reshape(out_channels // env.BLOCK_OUT,
env.BLOCK_OUT,
in_channels // env.BLOCK_IN,
env.BLOCK_IN).transpose((0, 2, 1, 3))
# Format the input/output arrays with tvm.nd.array to the DLPack standard
data_nd = tvm.nd.array(data_packed, ctx)
weight_nd = tvm.nd.array(weight_packed, ctx)
res_nd = tvm.nd.array(np.zeros(output_shape).astype(res.dtype), ctx)
# Clear stats
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
# Invoke the module to perform the computation
f(data_nd, weight_nd, res_nd)
# Verify against numpy implementation
res_ref = np.dot(data_np.astype(env.acc_dtype),
weight_np.T.astype(env.acc_dtype))
res_ref = res_ref >> env.INP_WIDTH
res_ref = np.clip(res_ref, 0, inp_max)
res_ref = res_ref.astype(res.dtype)
res_ref = res_ref.reshape(batch_size // env.BATCH,
env.BATCH,
out_channels // env.BLOCK_OUT,
env.BLOCK_OUT).transpose((0, 2, 1, 3))
np.testing.assert_equal(res_ref, res_nd.asnumpy())
# Print stats
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
print("Successful blocked matrix multiply test!")
######################################################################
# Summary
# -------
# This tutorial demonstrates how TVM scheduling primitives can achieve
# computation blocking for a matrix multiplication example.
# This allows us to map arbitrarily large computation onto limited
# hardware accelerator resources.
#
| [
"numpy.clip",
"tvm.rpc.LocalSession",
"vta.build",
"tvm.lower",
"tvm.te.reduce_axis",
"vta.program_fpga",
"vta.get_env",
"tvm.te.placeholder",
"vta.lower",
"tvm.nd.array",
"vta.testing.simulator.clear_stats",
"tvm.te.create_schedule",
"vta.testing.simulator.stats",
"vta.reconfig_runtime",
... | [((1758, 1771), 'vta.get_env', 'vta.get_env', ([], {}), '()\n', (1769, 1771), False, 'import vta\n'), ((1859, 1905), 'os.environ.get', 'os.environ.get', (['"""VTA_RPC_HOST"""', '"""192.168.2.99"""'], {}), "('VTA_RPC_HOST', '192.168.2.99')\n", (1873, 1905), False, 'import os\n'), ((4586, 4645), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, in_channels // env.BLOCK_IN)'], {'name': '"""ic"""'}), "((0, in_channels // env.BLOCK_IN), name='ic')\n", (4600, 4645), False, 'from tvm import te\n'), ((4655, 4703), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, env.BLOCK_IN)'], {'name': '"""ic_tns"""'}), "((0, env.BLOCK_IN), name='ic_tns')\n", (4669, 4703), False, 'from tvm import te\n'), ((4740, 4800), 'tvm.te.placeholder', 'te.placeholder', (['data_shape'], {'name': '"""data"""', 'dtype': 'env.inp_dtype'}), "(data_shape, name='data', dtype=env.inp_dtype)\n", (4754, 4800), False, 'from tvm import te\n'), ((4810, 4874), 'tvm.te.placeholder', 'te.placeholder', (['weight_shape'], {'name': '"""weight"""', 'dtype': 'env.wgt_dtype'}), "(weight_shape, name='weight', dtype=env.wgt_dtype)\n", (4824, 4874), False, 'from tvm import te\n'), ((6600, 6626), 'tvm.te.create_schedule', 'te.create_schedule', (['res.op'], {}), '(res.op)\n', (6618, 6626), False, 'from tvm import te\n'), ((12593, 12670), 'vta.build', 'vta.build', (['s', '[data, weight, res]', '"""ext_dev"""', 'env.target_host'], {'name': '"""my_gemm"""'}), "(s, [data, weight, res], 'ext_dev', env.target_host, name='my_gemm')\n", (12602, 12670), False, 'import vta\n'), ((12678, 12692), 'tvm.contrib.util.tempdir', 'util.tempdir', ([], {}), '()\n', (12690, 12692), False, 'from tvm.contrib import util\n'), ((13772, 13802), 'tvm.nd.array', 'tvm.nd.array', (['data_packed', 'ctx'], {}), '(data_packed, ctx)\n', (13784, 13802), False, 'import tvm\n'), ((13815, 13847), 'tvm.nd.array', 'tvm.nd.array', (['weight_packed', 'ctx'], {}), '(weight_packed, ctx)\n', (13827, 13847), False, 'import tvm\n'), ((14256, 14284), 'numpy.clip', 'np.clip', (['res_ref', '(0)', 'inp_max'], {}), '(res_ref, 0, inp_max)\n', (14263, 14284), True, 'import numpy as np\n'), ((1917, 1955), 'os.environ.get', 'os.environ.get', (['"""VTA_RPC_PORT"""', '"""9091"""'], {}), "('VTA_RPC_PORT', '9091')\n", (1931, 1955), False, 'import os\n'), ((2185, 2211), 'tvm.runtime.enabled', 'tvm.runtime.enabled', (['"""rpc"""'], {}), "('rpc')\n", (2204, 2211), False, 'import tvm\n'), ((2225, 2248), 'tvm.rpc.connect', 'rpc.connect', (['host', 'port'], {}), '(host, port)\n', (2236, 2248), False, 'from tvm import rpc\n'), ((2288, 2316), 'vta.reconfig_runtime', 'vta.reconfig_runtime', (['remote'], {}), '(remote)\n', (2308, 2316), False, 'import vta\n'), ((2507, 2547), 'vta.program_fpga', 'vta.program_fpga', (['remote'], {'bitstream': 'None'}), '(remote, bitstream=None)\n', (2523, 2547), False, 'import vta\n'), ((6674, 6725), 'tvm.lower', 'tvm.lower', (['s', '[data, weight, res]'], {'simple_mode': '(True)'}), '(s, [data, weight, res], simple_mode=True)\n', (6683, 6725), False, 'import tvm\n'), ((10030, 10081), 'tvm.lower', 'tvm.lower', (['s', '[data, weight, res]'], {'simple_mode': '(True)'}), '(s, [data, weight, res], simple_mode=True)\n', (10039, 10081), False, 'import tvm\n'), ((12149, 12200), 'vta.lower', 'vta.lower', (['s', '[data, weight, res]'], {'simple_mode': '(True)'}), '(s, [data, weight, res], simple_mode=True)\n', (12158, 12200), False, 'import vta\n'), ((13970, 13993), 'vta.testing.simulator.clear_stats', 'simulator.clear_stats', ([], {}), '()\n', (13991, 13993), False, 'from vta.testing import simulator\n'), ((14647, 14664), 'vta.testing.simulator.stats', 'simulator.stats', ([], {}), '()\n', (14662, 14664), False, 'from vta.testing import simulator\n'), ((2649, 2667), 'tvm.rpc.LocalSession', 'rpc.LocalSession', ([], {}), '()\n', (2665, 2667), False, 'from tvm import rpc\n'), ((12950, 13010), 'numpy.random.randint', 'np.random.randint', (['(-128)', '(128)'], {'size': '(batch_size, in_channels)'}), '(-128, 128, size=(batch_size, in_channels))\n', (12967, 13010), True, 'import numpy as np\n'), ((13047, 13109), 'numpy.random.randint', 'np.random.randint', (['(-128)', '(128)'], {'size': '(out_channels, in_channels)'}), '(-128, 128, size=(out_channels, in_channels))\n', (13064, 13109), True, 'import numpy as np\n'), ((13870, 13892), 'numpy.zeros', 'np.zeros', (['output_shape'], {}), '(output_shape)\n', (13878, 13892), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
#
# Generate a "tuning" datset, where each datapoint in the set consists of the information from two bouncing ball
# simulators. Used to train TuneNet.
import os
import os.path as osp
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset
from tune.utils import get_torch_device, get_dataset_base_path, get_immediate_subdirectories
device = get_torch_device()
class DatasetTuneNet(Dataset):
dataset_max = torch.tensor([5.0, 100.0, 5.0], device=device, dtype=torch.float64).unsqueeze(1).repeat(1, 400)
dataset_min = torch.tensor([0.0, 0.0, 0.0], device=device, dtype=torch.float64).unsqueeze(1).repeat(1, 400)
# dataset of videos, positions, and physics parameters for dropped balls.
def __init__(self, dataset_name, observation_type, transform=None):
"""
:param dataset_name: directory containing directories 0, 1, 2, etc. (one folder per simulation)
:param transform: Optional transform(s) to be applied on a sample
"""
self.root_dir = get_dataset_base_path(dataset_name)
print("this dataset exists in " + self.root_dir)
self.loadtype = observation_type
self.transform = transform
self.length = len(get_immediate_subdirectories(self.root_dir))
print('dataset loaded from {} with {} elements'.format(self.root_dir, self.length))
def __len__(self):
return self.length
def __getitem__(self, idx):
# load ground truth
def make_name(name):
return osp.join(self.root_dir, str(idx), name + ".npy")
if self.loadtype == "ground_truth":
zeta1 = (np.load(make_name("physics1"))[0], np.load(make_name("start_position"))[0])
zeta2 = (np.load(make_name("physics2"))[0], np.load(make_name("start_position"))[0])
zeta = torch.tensor(np.vstack((zeta1, zeta2))).to(device)
s1 = np.load(make_name("position1"))[:, 2]
s2 = np.load(make_name("position2"))[:, 2]
s = torch.tensor(np.vstack((s1, s2))).to(device)
elif self.loadtype == "observation":
zeta1 = (np.load(make_name("physics1"))[0], np.load(make_name("start_position"))[0])
zeta2 = (np.load(make_name("physics2"))[0], np.load(make_name("start_position"))[0])
zeta = torch.tensor(np.vstack((zeta1, zeta2))).to(device)
s1 = np.load(make_name("position1"))[:, 2]
s2 = np.load(make_name("center2"))[:, 1]
s3 = np.load(make_name("position2"))[:, 2]
s = torch.tensor(np.vstack((s1, s2, s3))).to(device)
else:
print("Loadtype {} not understood.".format(self.loadtype))
if self.transform:
s[:, :] = self.transform(s[:, :])
v1 = np.load(make_name("linear_velocity_list1"))[:, 2]
v2 = np.load(make_name("linear_velocity_list2"))[:, 2]
v = torch.tensor(np.vstack((v1, v2))).to(device)
return [zeta, s, v]
@classmethod
def get_data_loader(cls, dataset_name: str, observation_type: str, dataset_action: str, batch_size: int):
"""
Get a fully-fledged DataLoader object that can be used to iterate over a given dataset.
:param dataset_name: The overall name of the dataset to load. The dataset exists in a dir with this name.
:param observation_type: A string, either "ground_truth" or "observation," to help the network apply
transformations to the input and convert it to the range [-1, 1].
:param dataset_action: train, test, or val. The actual datapoints will be stored in a subdir with this name
inside the overall dataset folder
:param batch_size: Number of datapoints to load into each batch
:return:
"""
transform = transforms.Compose([])
if observation_type == "observation":
transform = transforms.Compose([
transforms.Lambda(lambda x: (x - cls.dataset_min) / cls.dataset_max)
])
dataset = DatasetTuneNet(dataset_name=dataset_name + "/" + dataset_action,
transform=transform,
observation_type=observation_type)
return torch.utils.data.DataLoader(
dataset,
shuffle=True,
batch_size=batch_size)
class GaussianNoise(object):
"""Rescale the image in a sample to a given size.
Args:
stdev (float): standard deviation of gaussian noise.
"""
def __init__(self, stdev=0.1):
self.stdev = stdev
self.noise = torch.tensor(0, dtype=torch.double).to(device)
def __call__(self, x):
sampled_noise = self.noise.repeat(*x.size()).normal_(self.stdev)
return x + sampled_noise
class DatasetTuneNetKinova(Dataset):
# dataset_max = torch.tensor([5.0, 100.0, 5.0], device=device, dtype=torch.float64).unsqueeze(1).repeat(1, 400)
# dataset_min = torch.tensor([0.0, 0.0, 0.0], device=device, dtype=torch.float64).unsqueeze(1).repeat(1, 400)
# dataset of videos, positions, and physics parameters for dropped balls.
def __init__(self, dataset_name, transform=None):
"""
:param root_dir: directory containing directories 0, 1, 2, etc. (one folder per simulation)
:param transform: Optional transform(s) to be applied on a sample
"""
self.root_dir = get_dataset_base_path(dataset_name)
print("this dataset exists in " + self.root_dir)
self.transform = transform
self.length = len(get_immediate_subdirectories(self.root_dir))
print('dataset loaded from {} with {} elements'.format(self.root_dir, self.length))
def __len__(self):
return self.length
def __getitem__(self, idx):
# load ground truth
def make_name(name):
return osp.join(self.root_dir, str(idx), name + ".npy")
zeta1 = np.load(make_name("mass1"))
zeta2 = np.load(make_name("mass2"))
zeta = torch.tensor(np.stack((zeta1, zeta2), axis=0)).to(device)
o1 = np.load(make_name("torques1"))
o2 = np.load(make_name("torques2"))
o = torch.tensor(np.stack((o1, o2), axis=0)).to(device)
if self.transform:
o = self.transform(o)
return [zeta, o]
@classmethod
def load_dataset_limits(cls, path):
limits_filename = os.path.join(path, "limits.npy")
# get data size
subdirs = get_immediate_subdirectories(path)
single_torque_size = np.load(os.path.join(path, subdirs[0], "torques1.npy")).shape
print(single_torque_size)
if os.path.isfile(limits_filename):
loaded = np.load(limits_filename)
print(loaded)
mean = loaded["mean"]
std = loaded["std"]
print("loaded dataset mean: {}, std: {}".format(mean, std))
mean_tensor = torch.tensor(mean).unsqueeze(0).repeat(single_torque_size[0]*2, 1).to(device)
std_tensor = torch.tensor(std).unsqueeze(0).repeat(single_torque_size[0]*2, 1).to(device)
# print("mean tensor shape")
# print(mean_tensor.shape)
return mean_tensor, std_tensor
else:
# we need to calculate the limits
torques = np.empty([len(subdirs)*2, *single_torque_size])
idx = 0
for subdir in subdirs:
for torque_file in "torques1.npy", "torques2.npy":
torques_loaded = np.load(os.path.join(path, subdir, torque_file))
torques[idx] = torques_loaded
idx += 1
mean = np.mean(torques, axis=(0, 1), keepdims=False)
std = np.std(torques, axis=(0, 1), keepdims=False)
# print("calculated dataset mean: {}, std: {}".format(mean, std))
# these gnarly lines spread the mean and std tensors so they are the shape of the loaded data structure
mean_tensor = torch.tensor(mean).unsqueeze(0).repeat(single_torque_size[0], 1).unsqueeze(0).repeat(2, 1, 1).to(device)
std_tensor = torch.tensor(std).unsqueeze(0).repeat(single_torque_size[0], 1).unsqueeze(0).repeat(2, 1, 1).to(device)
# print("mean tensor shape")
# print(mean_tensor.shape)
np.savez(limits_filename, allow_pickle=False, mean=mean, std=std)
return mean_tensor, std_tensor
@classmethod
def get_data_loader(cls, dataset_name: str, dataset_action: str, batch_size: int):
"""
Get a fully-fledged DataLoader object that can be used to iterate over a given dataset.
:param dataset_name: The overall name of the dataset to load. The dataset exists in a dir with this name.
:param dataset_action: train, test, or val. The actual datapoints will be stored in a subdir with this name
inside the overall dataset folder
:param batch_size: Number of datapoints to load into each batch
:return:
"""
dataset_dir = get_dataset_base_path(dataset_name)
dataset_mean, dataset_std = cls.load_dataset_limits(
os.path.join(dataset_dir, "train"))
transform = transforms.Compose([
transforms.Lambda(lambda x: (x - dataset_mean) / dataset_std)
])
dataset = DatasetTuneNetKinova(dataset_name=dataset_name + "/" + dataset_action,
transform=transform)
return torch.utils.data.DataLoader(
dataset,
shuffle=True,
batch_size=batch_size) | [
"numpy.mean",
"numpy.savez",
"tune.utils.get_torch_device",
"numpy.std",
"os.path.join",
"torchvision.transforms.Lambda",
"tune.utils.get_immediate_subdirectories",
"os.path.isfile",
"torch.tensor",
"numpy.stack",
"numpy.vstack",
"torch.utils.data.DataLoader",
"tune.utils.get_dataset_base_pa... | [((460, 478), 'tune.utils.get_torch_device', 'get_torch_device', ([], {}), '()\n', (476, 478), False, 'from tune.utils import get_torch_device, get_dataset_base_path, get_immediate_subdirectories\n'), ((1116, 1151), 'tune.utils.get_dataset_base_path', 'get_dataset_base_path', (['dataset_name'], {}), '(dataset_name)\n', (1137, 1151), False, 'from tune.utils import get_torch_device, get_dataset_base_path, get_immediate_subdirectories\n'), ((3910, 3932), 'torchvision.transforms.Compose', 'transforms.Compose', (['[]'], {}), '([])\n', (3928, 3932), True, 'import torchvision.transforms as transforms\n'), ((4345, 4418), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'shuffle': '(True)', 'batch_size': 'batch_size'}), '(dataset, shuffle=True, batch_size=batch_size)\n', (4372, 4418), False, 'import torch\n'), ((5510, 5545), 'tune.utils.get_dataset_base_path', 'get_dataset_base_path', (['dataset_name'], {}), '(dataset_name)\n', (5531, 5545), False, 'from tune.utils import get_torch_device, get_dataset_base_path, get_immediate_subdirectories\n'), ((6498, 6530), 'os.path.join', 'os.path.join', (['path', '"""limits.npy"""'], {}), "(path, 'limits.npy')\n", (6510, 6530), False, 'import os\n'), ((6573, 6607), 'tune.utils.get_immediate_subdirectories', 'get_immediate_subdirectories', (['path'], {}), '(path)\n', (6601, 6607), False, 'from tune.utils import get_torch_device, get_dataset_base_path, get_immediate_subdirectories\n'), ((6744, 6775), 'os.path.isfile', 'os.path.isfile', (['limits_filename'], {}), '(limits_filename)\n', (6758, 6775), False, 'import os\n'), ((9149, 9184), 'tune.utils.get_dataset_base_path', 'get_dataset_base_path', (['dataset_name'], {}), '(dataset_name)\n', (9170, 9184), False, 'from tune.utils import get_torch_device, get_dataset_base_path, get_immediate_subdirectories\n'), ((9578, 9651), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'shuffle': '(True)', 'batch_size': 'batch_size'}), '(dataset, shuffle=True, batch_size=batch_size)\n', (9605, 9651), False, 'import torch\n'), ((1312, 1355), 'tune.utils.get_immediate_subdirectories', 'get_immediate_subdirectories', (['self.root_dir'], {}), '(self.root_dir)\n', (1340, 1355), False, 'from tune.utils import get_torch_device, get_dataset_base_path, get_immediate_subdirectories\n'), ((5665, 5708), 'tune.utils.get_immediate_subdirectories', 'get_immediate_subdirectories', (['self.root_dir'], {}), '(self.root_dir)\n', (5693, 5708), False, 'from tune.utils import get_torch_device, get_dataset_base_path, get_immediate_subdirectories\n'), ((6798, 6822), 'numpy.load', 'np.load', (['limits_filename'], {}), '(limits_filename)\n', (6805, 6822), True, 'import numpy as np\n'), ((7754, 7799), 'numpy.mean', 'np.mean', (['torques'], {'axis': '(0, 1)', 'keepdims': '(False)'}), '(torques, axis=(0, 1), keepdims=False)\n', (7761, 7799), True, 'import numpy as np\n'), ((7818, 7862), 'numpy.std', 'np.std', (['torques'], {'axis': '(0, 1)', 'keepdims': '(False)'}), '(torques, axis=(0, 1), keepdims=False)\n', (7824, 7862), True, 'import numpy as np\n'), ((8409, 8474), 'numpy.savez', 'np.savez', (['limits_filename'], {'allow_pickle': '(False)', 'mean': 'mean', 'std': 'std'}), '(limits_filename, allow_pickle=False, mean=mean, std=std)\n', (8417, 8474), True, 'import numpy as np\n'), ((9258, 9292), 'os.path.join', 'os.path.join', (['dataset_dir', '"""train"""'], {}), "(dataset_dir, 'train')\n", (9270, 9292), False, 'import os\n'), ((4705, 4740), 'torch.tensor', 'torch.tensor', (['(0)'], {'dtype': 'torch.double'}), '(0, dtype=torch.double)\n', (4717, 4740), False, 'import torch\n'), ((6645, 6691), 'os.path.join', 'os.path.join', (['path', 'subdirs[0]', '"""torques1.npy"""'], {}), "(path, subdirs[0], 'torques1.npy')\n", (6657, 6691), False, 'import os\n'), ((9347, 9408), 'torchvision.transforms.Lambda', 'transforms.Lambda', (['(lambda x: (x - dataset_mean) / dataset_std)'], {}), '(lambda x: (x - dataset_mean) / dataset_std)\n', (9364, 9408), True, 'import torchvision.transforms as transforms\n'), ((530, 597), 'torch.tensor', 'torch.tensor', (['[5.0, 100.0, 5.0]'], {'device': 'device', 'dtype': 'torch.float64'}), '([5.0, 100.0, 5.0], device=device, dtype=torch.float64)\n', (542, 597), False, 'import torch\n'), ((644, 709), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0, 0.0]'], {'device': 'device', 'dtype': 'torch.float64'}), '([0.0, 0.0, 0.0], device=device, dtype=torch.float64)\n', (656, 709), False, 'import torch\n'), ((2989, 3008), 'numpy.vstack', 'np.vstack', (['(v1, v2)'], {}), '((v1, v2))\n', (2998, 3008), True, 'import numpy as np\n'), ((4040, 4108), 'torchvision.transforms.Lambda', 'transforms.Lambda', (['(lambda x: (x - cls.dataset_min) / cls.dataset_max)'], {}), '(lambda x: (x - cls.dataset_min) / cls.dataset_max)\n', (4057, 4108), True, 'import torchvision.transforms as transforms\n'), ((6128, 6160), 'numpy.stack', 'np.stack', (['(zeta1, zeta2)'], {'axis': '(0)'}), '((zeta1, zeta2), axis=0)\n', (6136, 6160), True, 'import numpy as np\n'), ((6287, 6313), 'numpy.stack', 'np.stack', (['(o1, o2)'], {'axis': '(0)'}), '((o1, o2), axis=0)\n', (6295, 6313), True, 'import numpy as np\n'), ((1929, 1954), 'numpy.vstack', 'np.vstack', (['(zeta1, zeta2)'], {}), '((zeta1, zeta2))\n', (1938, 1954), True, 'import numpy as np\n'), ((2107, 2126), 'numpy.vstack', 'np.vstack', (['(s1, s2)'], {}), '((s1, s2))\n', (2116, 2126), True, 'import numpy as np\n'), ((7614, 7653), 'os.path.join', 'os.path.join', (['path', 'subdir', 'torque_file'], {}), '(path, subdir, torque_file)\n', (7626, 7653), False, 'import os\n'), ((2411, 2436), 'numpy.vstack', 'np.vstack', (['(zeta1, zeta2)'], {}), '((zeta1, zeta2))\n', (2420, 2436), True, 'import numpy as np\n'), ((2642, 2665), 'numpy.vstack', 'np.vstack', (['(s1, s2, s3)'], {}), '((s1, s2, s3))\n', (2651, 2665), True, 'import numpy as np\n'), ((7013, 7031), 'torch.tensor', 'torch.tensor', (['mean'], {}), '(mean)\n', (7025, 7031), False, 'import torch\n'), ((7116, 7133), 'torch.tensor', 'torch.tensor', (['std'], {}), '(std)\n', (7128, 7133), False, 'import torch\n'), ((8083, 8101), 'torch.tensor', 'torch.tensor', (['mean'], {}), '(mean)\n', (8095, 8101), False, 'import torch\n'), ((8213, 8230), 'torch.tensor', 'torch.tensor', (['std'], {}), '(std)\n', (8225, 8230), False, 'import torch\n')] |
import numpy as np
from numpy import linalg
from gym import utils
import os
from gym.envs.mujoco import mujoco_env
import math
#from gym_reinmav.envs.mujoco import MujocoQuadEnv
# For testing whether a number is close to zero
_FLOAT_EPS = np.finfo(np.float64).eps
_EPS4 = _FLOAT_EPS * 4.0
class BallBouncingQuadEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
#xml_path = os.path.join(os.path.dirname(__file__), "./assets", 'half_cheetah.xml')
self.avg_rwd=-3.0 #obtained from eprewmean
self.gamma=0.99 #ppo2 default setting value
self.log_cnt=0
mujoco_env.MujocoEnv.__init__(self, 'ball_bouncing_quad.xml', 5)
utils.EzPickle.__init__(self)
def step(self, action):
mass=self.get_mass()
#print("mass=",mass[1])
#temp_thrust=
#action[0] += mass[1]*9.81 #gravity compensation, 0.4*9.81=3.92
#print("gamma=",self.gamma)
act_min=[3.5,-0.5,-0.7,-0.03]
act_max=[30,0.5,0.7,0.03]
# #action = np.clip(action, a_min=-np.inf, a_max=np.inf)
action = np.clip(action, a_min=act_min, a_max=act_max)
self.do_simulation(action, self.frame_skip)
ob = self._get_obs()
pos = ob[0:3]
#R = ob[3:12]
#lin_vel = ob[12:15]
#ang_vel= ob[15:18]
quat = ob[3:7]
lin_vel = ob[7:10]
ang_vel = ob[10:13]
#R=self.quat2mat(quat.transpose())
#rpy = self.RotToRPY(R)
#print("rpy(degrees) =",np.rad2deg(rpy))
reward_ctrl = - 0.1e-3 * np.sum(np.square(action))
reward_position = -linalg.norm(pos) * 1e-2
reward_linear_velocity = -linalg.norm(lin_vel) * 0.1e-3
reward_angular_velocity = -linalg.norm(ang_vel) * 0.1e-3
reward_alive = 1e-2
reward = reward_ctrl+reward_position+reward_linear_velocity+reward_angular_velocity+reward_alive
done= abs(pos[2]) >50 \
or abs(pos[0]) > 50.0 \
or abs(pos[1]) > 50.0
# print("status=",status)
print("pos=",pos)
info = {
'rwp': reward_position,
'rwlv': reward_linear_velocity,
'rwav': reward_angular_velocity,
'rwctrl': reward_ctrl,
'obx': pos[0],
'oby': pos[1],
'obz': pos[2],
'obvx': lin_vel[0],
'obvy': lin_vel[1],
'obvz': lin_vel[2],
}
# retOb= np.concatenate([
# pos,R.flat,lin_vel,ang_vel])
if done:
reward = self.avg_rwd / (1-self.gamma)*2#-13599.99
#print("terminated reward=",reward)
#return retOb, reward, done, info
if (self.log_cnt==1e4):
print("x={},y={},z={}\n".format(pos[0],pos[1],pos[2]))
print("thrust={}, dx={}, dy={}, dz={}".format(action[0],action[1],action[2],action[3]))
self.log_cnt=0
else: self.log_cnt=self.log_cnt+1
return ob, reward, done, info
def _get_obs(self):
# pos = self.sim.data.qpos*1e-1
# vel = self.sim.data.qvel*1e-2
pos = self.sim.data.qpos*1e-0
vel = self.sim.data.qvel*1e-0
return np.concatenate([pos.flat,vel.flat])
def reset_model(self):
# pos = self.np_random.uniform(size=3, low=-20, high=20)
# quat = self.np_random.uniform(size=4, low=-1, high=1)
# linVel = self.np_random.uniform(size=3, low=-2, high=2)
# angVel = self.np_random.uniform(size=3, low=-0.5, high=0.5)
# qpos = np.concatenate([pos,quat])
# qvel = np.concatenate([linVel,angVel])
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-0.1, high=0.1)
qvel = self.init_qvel + self.np_random.uniform(size=self.model.nv, low=-0.05, high=0.05)
#qpos[0:3] += self.np_random.uniform(low=-5, high=5, size=3)
#qpos = self.init_qpos
#qpos[0:3] = qpos[0:3]+self.np_random.uniform(size=3, low=-10, high=10)
#ob[3:12] = self.np_random.uniform(size=9, low=-1, high=1)
#qvel += self.np_random.uniform(size=6, low=-0.5, high=0.5)
#qvel[0:3] = self.np_random.uniform(size=3, low=-2, high=2)
#qvel[3:6] = self.np_random.uniform(size=3, low=-0.5, high=0.5)
self.set_state(qpos, qvel)
observation = self._get_obs();
return observation
def viewer_setup(self):
v = self.viewer
v.cam.trackbodyid = 0
v.cam.distance = self.model.stat.extent * 4
def get_mass(self):
mass = np.expand_dims(self.model.body_mass, axis=1)
return mass
#stealed from rotations.py
def quat2mat(self,quat):
""" Convert Quaternion to Rotation matrix. See rotation.py for notes """
quat = np.asarray(quat, dtype=np.float64)
assert quat.shape[-1] == 4, "Invalid shape quat {}".format(quat)
w, x, y, z = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3]
Nq = np.sum(quat * quat, axis=-1)
s = 2.0 / Nq
X, Y, Z = x * s, y * s, z * s
wX, wY, wZ = w * X, w * Y, w * Z
xX, xY, xZ = x * X, x * Y, x * Z
yY, yZ, zZ = y * Y, y * Z, z * Z
mat = np.empty(quat.shape[:-1] + (3, 3), dtype=np.float64)
mat[..., 0, 0] = 1.0 - (yY + zZ)
mat[..., 0, 1] = xY - wZ
mat[..., 0, 2] = xZ + wY
mat[..., 1, 0] = xY + wZ
mat[..., 1, 1] = 1.0 - (xX + zZ)
mat[..., 1, 2] = yZ - wX
mat[..., 2, 0] = xZ - wY
mat[..., 2, 1] = yZ + wX
mat[..., 2, 2] = 1.0 - (xX + yY)
return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))
def RotToRPY(self,R):
R=R.reshape(3,3) #to remove the last dimension i.e., 3,3,1
phi = math.asin(R[1,2])
psi = math.atan2(-R[1,0]/math.cos(phi),R[1,1]/math.cos(phi))
theta = math.atan2(-R[0,2]/math.cos(phi),R[2,2]/math.cos(phi))
return phi,theta,psi
# def __init__(self, xml_name="quadrotor_quat.xml"):
# super(MujocoQuadQuaternionEnv, self).__init__(xml_name=xml_name)
# def step(self, action):
# goal_pos = np.array([0.0, 0.0, 1.0])
# alive_bonus = 1e1
# xposbefore = self.sim.data.qpos[0]
# self.do_simulation(action, self.frame_skip)
# xposafter = self.sim.data.qpos[0]
# ob = self._get_obs()
# pos = ob[0:3]
# quat = ob[3:7]
# lin_vel = ob[7:10]
# ang_vel= ob[10:13]
# lin_acc = ob[13:16]
# ang_acc = ob[16:19]
# #print("step a=",a)
# #reward_position = -linalg.norm(pos-goal_pos) * 0.2e-1
# reward_position = -linalg.norm(pos) * 0.2e-1
# reward_linear_velocity = -linalg.norm(lin_vel) * 1e-3
# reward_angular_velocity = -linalg.norm(ang_vel) * 1e-1
# reward_action = -linalg.norm(action)+np.sum(action)*1e-1
# reward_alive = alive_bonus
# # reward = reward_position \
# # + reward_linear_velocity \
# # + reward_angular_velocity \
# # + reward_action \
# # + reward_alive
# reward_ctrl = - 0.1 * np.square(action).sum()
# reward_run = (xposafter - xposbefore)/self.dt
# #print("r_ctrl=",reward_ctrl)
# #print("r_run=",reward_run)
# reward = reward_ctrl + reward_run
# # notdone = np.isfinite(ob).all() \
# # and pos[2] > 0.3 \
# # and abs(pos[0]) < 2.0 \
# # and abs(pos[1]) < 2.0
# notdone = np.isfinite(ob).all() \
# and abs(pos[0]) < 2.0 \
# and abs(pos[1]) < 2.0
# # info = {
# # 'rp': reward_position,
# # 'rlv': reward_linear_velocity,
# # 'rav': reward_angular_velocity,
# # 'ra': reward_action,
# # 'rlive': reward_alive,
# # }
# # info = {
# # 'rp': reward_position,
# # 'rlv': reward_linear_velocity,
# # 'rav': reward_ctrl,
# # 'ra': reward_action,
# # 'rlive': reward_run,
# # }
# info=dict(reward_run=reward_run, reward_ctrl=reward_ctrl)
# #if done=True indicates the episode has terminated and it's time to reset the environment. (For example, perhaps the pole tipped too far, or you lost your last life.) https://gym.openai.com/docs/
# #done = not notdone
# done = False
# return ob, reward, done, info
# def reset_model(self):
# #If reset, then we add some variations to the initial state that will be exploited for the next ep. The low and high bounds empirically set.
# qpos=self.init_qpos
# qvel=self.init_qvel
# qpos[0:3] +=self.np_random.uniform(size=3, low=-0.1, high=0.1)
# qvel[0:3] +=self.np_random.uniform(size=3, low=-0.01, high=0.01)
# self.set_state(qpos, qvel)
# return self._get_obs()
# def clip_action(self, action):
# """
# clip action to [0, inf]
# :param action:
# :return: clipped action
# """
# act_min=[0,-0.5,-0.5,-0.5]
# act_max=[7,0.5,0.5,0.5]
# #action = np.clip(action, a_min=-np.inf, a_max=np.inf)
# action = np.clip(action, a_min=act_min, a_max=act_max)
# return action
| [
"numpy.clip",
"numpy.eye",
"math.asin",
"numpy.asarray",
"numpy.linalg.norm",
"gym.envs.mujoco.mujoco_env.MujocoEnv.__init__",
"numpy.square",
"math.cos",
"numpy.sum",
"gym.utils.EzPickle.__init__",
"numpy.empty",
"numpy.concatenate",
"numpy.expand_dims",
"numpy.finfo"
] | [((241, 261), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (249, 261), True, 'import numpy as np\n'), ((607, 671), 'gym.envs.mujoco.mujoco_env.MujocoEnv.__init__', 'mujoco_env.MujocoEnv.__init__', (['self', '"""ball_bouncing_quad.xml"""', '(5)'], {}), "(self, 'ball_bouncing_quad.xml', 5)\n", (636, 671), False, 'from gym.envs.mujoco import mujoco_env\n'), ((680, 709), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (703, 709), False, 'from gym import utils\n'), ((1084, 1129), 'numpy.clip', 'np.clip', (['action'], {'a_min': 'act_min', 'a_max': 'act_max'}), '(action, a_min=act_min, a_max=act_max)\n', (1091, 1129), True, 'import numpy as np\n'), ((3166, 3202), 'numpy.concatenate', 'np.concatenate', (['[pos.flat, vel.flat]'], {}), '([pos.flat, vel.flat])\n', (3180, 3202), True, 'import numpy as np\n'), ((4512, 4556), 'numpy.expand_dims', 'np.expand_dims', (['self.model.body_mass'], {'axis': '(1)'}), '(self.model.body_mass, axis=1)\n', (4526, 4556), True, 'import numpy as np\n'), ((4735, 4769), 'numpy.asarray', 'np.asarray', (['quat'], {'dtype': 'np.float64'}), '(quat, dtype=np.float64)\n', (4745, 4769), True, 'import numpy as np\n'), ((4933, 4961), 'numpy.sum', 'np.sum', (['(quat * quat)'], {'axis': '(-1)'}), '(quat * quat, axis=-1)\n', (4939, 4961), True, 'import numpy as np\n'), ((5159, 5211), 'numpy.empty', 'np.empty', (['(quat.shape[:-1] + (3, 3))'], {'dtype': 'np.float64'}), '(quat.shape[:-1] + (3, 3), dtype=np.float64)\n', (5167, 5211), True, 'import numpy as np\n'), ((5729, 5747), 'math.asin', 'math.asin', (['R[1, 2]'], {}), '(R[1, 2])\n', (5738, 5747), False, 'import math\n'), ((5610, 5619), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (5616, 5619), True, 'import numpy as np\n'), ((1554, 1571), 'numpy.square', 'np.square', (['action'], {}), '(action)\n', (1563, 1571), True, 'import numpy as np\n'), ((1600, 1616), 'numpy.linalg.norm', 'linalg.norm', (['pos'], {}), '(pos)\n', (1611, 1616), False, 'from numpy import linalg\n'), ((1658, 1678), 'numpy.linalg.norm', 'linalg.norm', (['lin_vel'], {}), '(lin_vel)\n', (1669, 1678), False, 'from numpy import linalg\n'), ((1723, 1743), 'numpy.linalg.norm', 'linalg.norm', (['ang_vel'], {}), '(ang_vel)\n', (1734, 1743), False, 'from numpy import linalg\n'), ((5780, 5793), 'math.cos', 'math.cos', (['phi'], {}), '(phi)\n', (5788, 5793), False, 'import math\n'), ((5801, 5814), 'math.cos', 'math.cos', (['phi'], {}), '(phi)\n', (5809, 5814), False, 'import math\n'), ((5851, 5864), 'math.cos', 'math.cos', (['phi'], {}), '(phi)\n', (5859, 5864), False, 'import math\n'), ((5872, 5885), 'math.cos', 'math.cos', (['phi'], {}), '(phi)\n', (5880, 5885), False, 'import math\n')] |
'''
Here will see how to use shapes features of opencv to be use
in different application.
'''
import cv2
import numpy as np
# First we try one sample image -
# The grey level or grey value indicates the brightness of a pixel. The minimum grey level is 0.
# The maximum grey level depends on the digitisation depth of the image. For an 8-bit-deep image it is 255.
# '0' gray value indicates - black image
black_img = np.zeros((512,512,3), np.uint8)
'''
Drawing a line : cv2.line(src, line_point1, line_point2, color, thickness)
src : It is the image on which line is to be drawn.
rect_point1 :- Start coordinate, here (0, 0) represents the top left corner of line
rect_point2 :- Ending coordinate, here (512, 512) represents the bottom right corner of line
color: It is the color of line to be drawn. For BGR, we pass a tuple. eg: (255, 0, 0) for blue color.
thickness: It is the thickness of the line in px.
Return Value: It returns an image.
'''
cv2.line(black_img, (0,0),(black_img.shape[0],black_img.shape[1]),(0,255,0),2)
cv2.imshow('Drawing_line', black_img)
cv2.waitKey(0)
'''
Drawing a rectangle : cv2.rectangle(src, rect_point1, rect_point2, color, thickness)
src : It is the image on which rectangle is to be drawn.
rect_point1 :- Start coordinate, here (350, 100) represents the top left corner of rectangle
rect_point2 :- Ending coordinate, here (450, 200) represents the bottom right corner of rectangle
color: It is the color of border line of rectangle to be drawn. For BGR, we pass a tuple. eg: (255, 0, 0) for blue color.
thickness: It is the thickness of the rectangle border line in px. Thickness of -1 px will fill the rectangle shape by the specified color.
Return Value: It returns an image.
'''
cv2.rectangle(black_img, (350,100),(450,200),(0,255,0),2)
cv2.imshow('Drawing_rect', black_img)
cv2.waitKey(0)
cv2.rectangle(black_img, (350,100),(450,200),(255,0,0),cv2.FILLED)
cv2.imshow('Drawing_rect_filled', black_img)
cv2.waitKey(0)
'''
Drawing a circle : cv2.circle(src, center_point, radius, color, thickness)
'''
cv2.circle(black_img, (300,400),50,(0,255,255),2, cv2.FILLED)
cv2.imshow('Drawing_circle', black_img)
cv2.waitKey(0)
'''
Drawing a ellipse : cv2.circle(src, center_coordinates, axesLength, startAngle, endAngle, color, thickness)
'''
# (X coordinate value, Y coordinate value).
center_coordinates = (120, 400)
# (major axis length, minor axis length) = (a,b)
axesLength = (100, 50)
# Ellipse rotation angle in degrees.
angle = 0
# Starting angle of the elliptic arc in degrees.
startAngle = 0
# Ending angle of the elliptic arc in degrees.
endAngle = 360
# Red color in BGR
color = (0, 0, 255)
# Line thickness of 5 px - thickness of the shape border line in px.
thickness = 5
# Using cv2.ellipse() method
# Draw a ellipse with red line borders of thickness of 5 px
cv2.ellipse(black_img, center_coordinates, axesLength,
angle, startAngle, endAngle, color, thickness)
cv2.imshow('Drawing_ellipse', black_img)
cv2.waitKey(0)
'''
Drawing a polygon : cv2.polylines(src, array of coordinates, True (if it is a closed line), Stroke color, Stroke thickness)
'''
pts = np.array([[60,15],[80,60],[120,60],[100,15]], np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(black_img,[pts],True,(255,255,255), 2)
cv2.imshow('Drawing_window', black_img)
cv2.waitKey(0)
## Now we will look for actually images
img = cv2.imread('../Images and videos/image8.jpg')
cv2.imshow('img', img)
cv2.waitKey(0)
## Using lines drawing a simple 3-sided boundary
## cv2.line(src, line_point1, line_point1, color, thickness)
cv2.line(img, (0,0), (265,0),(255,0,0), 5 )
cv2.line(img, (265,0), (265, 265), (255,0,0), 5)
cv2.line(img, (0,185), (265,185), (255,0,0), 5)
## Displaying the modified image
cv2.imshow('line',img)
cv2.waitKey(0)
## How to draw rectangle around an image
cv2.rectangle(img, (0,0), (265,185), (0,255,0), 3)
cv2.imshow('rectangle', img)
cv2.waitKey(0)
## Putting some text in image
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'House', (130,160) ,font, 1, (255,0,0), 2, cv2.LINE_AA )
cv2.imshow('text', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"cv2.polylines",
"cv2.line",
"cv2.imshow",
"cv2.putText",
"cv2.ellipse",
"numpy.zeros",
"cv2.circle",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.waitKey",
"cv2.imread"
] | [((421, 454), 'numpy.zeros', 'np.zeros', (['(512, 512, 3)', 'np.uint8'], {}), '((512, 512, 3), np.uint8)\n', (429, 454), True, 'import numpy as np\n'), ((957, 1047), 'cv2.line', 'cv2.line', (['black_img', '(0, 0)', '(black_img.shape[0], black_img.shape[1])', '(0, 255, 0)', '(2)'], {}), '(black_img, (0, 0), (black_img.shape[0], black_img.shape[1]), (0, \n 255, 0), 2)\n', (965, 1047), False, 'import cv2\n'), ((1036, 1073), 'cv2.imshow', 'cv2.imshow', (['"""Drawing_line"""', 'black_img'], {}), "('Drawing_line', black_img)\n", (1046, 1073), False, 'import cv2\n'), ((1074, 1088), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1085, 1088), False, 'import cv2\n'), ((1732, 1796), 'cv2.rectangle', 'cv2.rectangle', (['black_img', '(350, 100)', '(450, 200)', '(0, 255, 0)', '(2)'], {}), '(black_img, (350, 100), (450, 200), (0, 255, 0), 2)\n', (1745, 1796), False, 'import cv2\n'), ((1790, 1827), 'cv2.imshow', 'cv2.imshow', (['"""Drawing_rect"""', 'black_img'], {}), "('Drawing_rect', black_img)\n", (1800, 1827), False, 'import cv2\n'), ((1828, 1842), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1839, 1842), False, 'import cv2\n'), ((1844, 1917), 'cv2.rectangle', 'cv2.rectangle', (['black_img', '(350, 100)', '(450, 200)', '(255, 0, 0)', 'cv2.FILLED'], {}), '(black_img, (350, 100), (450, 200), (255, 0, 0), cv2.FILLED)\n', (1857, 1917), False, 'import cv2\n'), ((1911, 1955), 'cv2.imshow', 'cv2.imshow', (['"""Drawing_rect_filled"""', 'black_img'], {}), "('Drawing_rect_filled', black_img)\n", (1921, 1955), False, 'import cv2\n'), ((1956, 1970), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1967, 1970), False, 'import cv2\n'), ((2057, 2124), 'cv2.circle', 'cv2.circle', (['black_img', '(300, 400)', '(50)', '(0, 255, 255)', '(2)', 'cv2.FILLED'], {}), '(black_img, (300, 400), 50, (0, 255, 255), 2, cv2.FILLED)\n', (2067, 2124), False, 'import cv2\n'), ((2119, 2158), 'cv2.imshow', 'cv2.imshow', (['"""Drawing_circle"""', 'black_img'], {}), "('Drawing_circle', black_img)\n", (2129, 2158), False, 'import cv2\n'), ((2159, 2173), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2170, 2173), False, 'import cv2\n'), ((2846, 2951), 'cv2.ellipse', 'cv2.ellipse', (['black_img', 'center_coordinates', 'axesLength', 'angle', 'startAngle', 'endAngle', 'color', 'thickness'], {}), '(black_img, center_coordinates, axesLength, angle, startAngle,\n endAngle, color, thickness)\n', (2857, 2951), False, 'import cv2\n'), ((2959, 2999), 'cv2.imshow', 'cv2.imshow', (['"""Drawing_ellipse"""', 'black_img'], {}), "('Drawing_ellipse', black_img)\n", (2969, 2999), False, 'import cv2\n'), ((3000, 3014), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3011, 3014), False, 'import cv2\n'), ((3157, 3219), 'numpy.array', 'np.array', (['[[60, 15], [80, 60], [120, 60], [100, 15]]', 'np.int32'], {}), '([[60, 15], [80, 60], [120, 60], [100, 15]], np.int32)\n', (3165, 3219), True, 'import numpy as np\n'), ((3241, 3298), 'cv2.polylines', 'cv2.polylines', (['black_img', '[pts]', '(True)', '(255, 255, 255)', '(2)'], {}), '(black_img, [pts], True, (255, 255, 255), 2)\n', (3254, 3298), False, 'import cv2\n'), ((3294, 3333), 'cv2.imshow', 'cv2.imshow', (['"""Drawing_window"""', 'black_img'], {}), "('Drawing_window', black_img)\n", (3304, 3333), False, 'import cv2\n'), ((3334, 3348), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3345, 3348), False, 'import cv2\n'), ((3397, 3442), 'cv2.imread', 'cv2.imread', (['"""../Images and videos/image8.jpg"""'], {}), "('../Images and videos/image8.jpg')\n", (3407, 3442), False, 'import cv2\n'), ((3443, 3465), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (3453, 3465), False, 'import cv2\n'), ((3466, 3480), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3477, 3480), False, 'import cv2\n'), ((3594, 3641), 'cv2.line', 'cv2.line', (['img', '(0, 0)', '(265, 0)', '(255, 0, 0)', '(5)'], {}), '(img, (0, 0), (265, 0), (255, 0, 0), 5)\n', (3602, 3641), False, 'import cv2\n'), ((3638, 3689), 'cv2.line', 'cv2.line', (['img', '(265, 0)', '(265, 265)', '(255, 0, 0)', '(5)'], {}), '(img, (265, 0), (265, 265), (255, 0, 0), 5)\n', (3646, 3689), False, 'import cv2\n'), ((3687, 3738), 'cv2.line', 'cv2.line', (['img', '(0, 185)', '(265, 185)', '(255, 0, 0)', '(5)'], {}), '(img, (0, 185), (265, 185), (255, 0, 0), 5)\n', (3695, 3738), False, 'import cv2\n'), ((3769, 3792), 'cv2.imshow', 'cv2.imshow', (['"""line"""', 'img'], {}), "('line', img)\n", (3779, 3792), False, 'import cv2\n'), ((3792, 3806), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3803, 3806), False, 'import cv2\n'), ((3849, 3903), 'cv2.rectangle', 'cv2.rectangle', (['img', '(0, 0)', '(265, 185)', '(0, 255, 0)', '(3)'], {}), '(img, (0, 0), (265, 185), (0, 255, 0), 3)\n', (3862, 3903), False, 'import cv2\n'), ((3900, 3928), 'cv2.imshow', 'cv2.imshow', (['"""rectangle"""', 'img'], {}), "('rectangle', img)\n", (3910, 3928), False, 'import cv2\n'), ((3929, 3943), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3940, 3943), False, 'import cv2\n'), ((4007, 4082), 'cv2.putText', 'cv2.putText', (['img', '"""House"""', '(130, 160)', 'font', '(1)', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), "(img, 'House', (130, 160), font, 1, (255, 0, 0), 2, cv2.LINE_AA)\n", (4018, 4082), False, 'import cv2\n'), ((4081, 4104), 'cv2.imshow', 'cv2.imshow', (['"""text"""', 'img'], {}), "('text', img)\n", (4091, 4104), False, 'import cv2\n'), ((4105, 4119), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4116, 4119), False, 'import cv2\n'), ((4121, 4144), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4142, 4144), False, 'import cv2\n')] |
import abc
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
from numpy import inf, arange, meshgrid, vectorize, full, zeros, array, ndarray
from matplotlib import cm
class Benchmark(metaclass=abc.ABCMeta):
def __init__(self, lower, upper, dimension):
self.dimension = dimension
if isinstance(lower, (float, int)):
self.lower = full(self.dimension, lower)
self.upper = full(self.dimension, upper)
elif isinstance(lower, (ndarray, list, tuple)) and len(lower) == dimension:
self.lower = array(lower)
self.upper = array(upper)
else:
raise ValueError("{bench}: Type mismatch or Length of bound mismatch with dimension".format(bench=self.__class__.__name__))
def get_optimum(self):
return array([zeros(self.dimension)]), 0.0
pass
@staticmethod
def eval(**kwargs):
return inf
pass
def __2dfun(self, x, y, f): return f((x, y))
def plot(self, scale=None, save_path=None):
if not scale:
scale = abs(self.upper[0] / 100)
fig = plt.figure()
ax = fig.gca(projection='3d')
func = self.eval
X_range, Y_range = arange(self.lower[0], self.upper[0], scale), arange(self.lower[1], self.upper[1], scale)
X, Y = meshgrid(X_range, Y_range)
Z = vectorize(self.__2dfun)(X, Y, func)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, alpha=0.6, cmap=cm.rainbow)
# cset = ax.contourf(X, Y, Z, zdir='z', offset=0, cmap=cm.coolwarm)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if save_path:
plt.savefig(save_path+'/{benchmark}.png'.format(benchmark=self.__class__.__name__), dpi=100)
plt.clf()
plt.close()
else:
plt.show()
# plt.show()
| [
"numpy.full",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.meshgrid",
"numpy.vectorize",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((1128, 1140), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1138, 1140), True, 'from matplotlib import pyplot as plt\n'), ((1335, 1361), 'numpy.meshgrid', 'meshgrid', (['X_range', 'Y_range'], {}), '(X_range, Y_range)\n', (1343, 1361), False, 'from numpy import inf, arange, meshgrid, vectorize, full, zeros, array, ndarray\n'), ((390, 417), 'numpy.full', 'full', (['self.dimension', 'lower'], {}), '(self.dimension, lower)\n', (394, 417), False, 'from numpy import inf, arange, meshgrid, vectorize, full, zeros, array, ndarray\n'), ((443, 470), 'numpy.full', 'full', (['self.dimension', 'upper'], {}), '(self.dimension, upper)\n', (447, 470), False, 'from numpy import inf, arange, meshgrid, vectorize, full, zeros, array, ndarray\n'), ((1231, 1274), 'numpy.arange', 'arange', (['self.lower[0]', 'self.upper[0]', 'scale'], {}), '(self.lower[0], self.upper[0], scale)\n', (1237, 1274), False, 'from numpy import inf, arange, meshgrid, vectorize, full, zeros, array, ndarray\n'), ((1276, 1319), 'numpy.arange', 'arange', (['self.lower[1]', 'self.upper[1]', 'scale'], {}), '(self.lower[1], self.upper[1], scale)\n', (1282, 1319), False, 'from numpy import inf, arange, meshgrid, vectorize, full, zeros, array, ndarray\n'), ((1374, 1397), 'numpy.vectorize', 'vectorize', (['self.__2dfun'], {}), '(self.__2dfun)\n', (1383, 1397), False, 'from numpy import inf, arange, meshgrid, vectorize, full, zeros, array, ndarray\n'), ((1789, 1798), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1796, 1798), True, 'from matplotlib import pyplot as plt\n'), ((1811, 1822), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1820, 1822), True, 'from matplotlib import pyplot as plt\n'), ((1849, 1859), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1857, 1859), True, 'from matplotlib import pyplot as plt\n'), ((580, 592), 'numpy.array', 'array', (['lower'], {}), '(lower)\n', (585, 592), False, 'from numpy import inf, arange, meshgrid, vectorize, full, zeros, array, ndarray\n'), ((618, 630), 'numpy.array', 'array', (['upper'], {}), '(upper)\n', (623, 630), False, 'from numpy import inf, arange, meshgrid, vectorize, full, zeros, array, ndarray\n'), ((831, 852), 'numpy.zeros', 'zeros', (['self.dimension'], {}), '(self.dimension)\n', (836, 852), False, 'from numpy import inf, arange, meshgrid, vectorize, full, zeros, array, ndarray\n')] |
from __future__ import absolute_import, print_function, division
import numpy
from .type import TypedListType
import theano
from theano.gof import Apply, Constant, Op, Variable
from theano.tensor.type_other import SliceType
from theano import tensor as T
from theano.compile.debugmode import _lessbroken_deepcopy
class _typed_list_py_operators:
def __getitem__(self, index):
return getitem(self, index)
def __len__(self):
return length(self)
def append(self, toAppend):
return append(self, toAppend)
def extend(self, toAppend):
return extend(self, toAppend)
def insert(self, index, toInsert):
return insert(self, index, toInsert)
def remove(self, toRemove):
return remove(self, toRemove)
def reverse(self):
return reverse(self)
def count(self, elem):
return count(self, elem)
# name "index" is already used by an attribute
def ind(self, elem):
return index_(self, elem)
ttype = property(lambda self: self.type.ttype)
dtype = property(lambda self: self.type.ttype.dtype)
ndim = property(lambda self: self.type.ttype.ndim + 1)
class TypedListVariable(_typed_list_py_operators, Variable):
"""
Subclass to add the typed list operators to the basic `Variable` class.
"""
TypedListType.Variable = TypedListVariable
class TypedListConstant(_typed_list_py_operators, Constant):
"""
Subclass to add the typed list operators to the basic `Variable` class.
"""
TypedListType.Constant = TypedListConstant
class GetItem(Op):
# See doc in instance of this Op or function after this class definition.
view_map = {0: [0]}
__props__ = ()
def make_node(self, x, index):
assert isinstance(x.type, TypedListType)
if not isinstance(index, Variable):
if isinstance(index, slice):
index = Constant(SliceType(), index)
return Apply(self, [x, index], [x.type()])
else:
index = T.constant(index, ndim=0, dtype='int64')
return Apply(self, [x, index], [x.ttype()])
if isinstance(index.type, SliceType):
return Apply(self, [x, index], [x.type()])
elif isinstance(index, T.TensorVariable) and index.ndim == 0:
assert index.dtype == 'int64'
return Apply(self, [x, index], [x.ttype()])
else:
raise TypeError('Expected scalar or slice as index.')
def perform(self, node, inputs, outputs):
(x, index) = inputs
(out,) = outputs
if not isinstance(index, slice):
index = int(index)
out[0] = x[index]
def __str__(self):
return self.__class__.__name__
def c_code(self, node, name, inp, out, sub):
x_name, index = inp[0], inp[1]
output_name = out[0]
fail = sub['fail']
return """
%(output_name)s = (typeof %(output_name)s) PyList_GetItem( (PyObject*) %(x_name)s, *((npy_int64 *) PyArray_DATA(%(index)s)));
if(%(output_name)s == NULL){
%(fail)s
}
Py_INCREF(%(output_name)s);
""" % locals()
def c_code_cache_version(self):
return (1,)
getitem = GetItem()
"""
Get specified slice of a typed list.
Parameters
----------
x
Typed list.
index
The index of the value to return from `x`.
"""
class Append(Op):
# See doc in instance of this Op after the class definition.
__props__ = ("inplace",)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
# TODO: make destroy_handler support having views and
# destroyed version of multiple inputs.
# self.view_map = {0: [1]}
else:
# TODO: make destroy_handler support multiple view
# self.view_map = {0: [0, 1]}
self.view_map = {0: [0]}
def make_node(self, x, toAppend):
assert isinstance(x.type, TypedListType)
assert x.ttype == toAppend.type, (x.ttype, toAppend.type)
return Apply(self, [x, toAppend], [x.type()])
def perform(self, node, inputs, outputs):
(x, toAppend) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
out[0] = x
# need to copy toAppend due to destroy_handler limitation
toAppend = _lessbroken_deepcopy(toAppend)
out[0].append(toAppend)
def __str__(self):
return self.__class__.__name__
# DISABLED AS WE NEED TO UPDATE IT TO COPY toAppend().
def _c_code_(self, node, name, inp, out, sub):
x_name, toAppend = inp[0], inp[1]
output_name = out[0]
fail = sub['fail']
if not self.inplace:
init = """
%(output_name)s = (PyListObject*) PyList_GetSlice((PyObject*) %(x_name)s, 0, PyList_GET_SIZE((PyObject*) %(x_name)s)) ;
""" % locals()
else:
init = """
%(output_name)s = %(x_name)s;
""" % locals()
return init + """
if(%(output_name)s==NULL){
%(fail)s
};
if(PyList_Append( (PyObject*) %(output_name)s,(PyObject*) %(toAppend)s)){
%(fail)s
};
Py_INCREF(%(output_name)s);
""" % locals()
def c_code_cache_version(self):
return (1,)
append = Append()
"""
Append an element at the end of another list.
Parameters
----------
x
The base typed list.
y
The element to append to `x`.
"""
class Extend(Op):
# See doc in instance of this Op after the class definition.
__props__ = ("inplace",)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
# TODO: make destroy_handler support having views and
# destroyed version of multiple inputs.
# self.view_map = {0: [1]}
else:
# TODO: make destroy_handler support multiple view
# self.view_map = {0: [0, 1]}
self.view_map = {0: [0]}
def make_node(self, x, toAppend):
assert isinstance(x.type, TypedListType)
assert x.type == toAppend.type
return Apply(self, [x, toAppend], [x.type()])
def perform(self, node, inputs, outputs):
(x, toAppend) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
out[0] = x
# need to copy toAppend due to destroy_handler limitation
if toAppend:
o = out[0]
for i in toAppend:
o.append(_lessbroken_deepcopy(i))
def __str__(self):
return self.__class__.__name__
# DISABLED AS WE NEED TO UPDATE IT TO COPY toAppend().
def _c_code_(self, node, name, inp, out, sub):
x_name, toAppend = inp[0], inp[1]
output_name = out[0]
fail = sub['fail']
if not self.inplace:
init = """
%(output_name)s = (PyListObject*) PyList_GetSlice((PyObject*) %(x_name)s, 0, PyList_GET_SIZE((PyObject*) %(x_name)s)) ;
""" % locals()
else:
init = """
%(output_name)s = %(x_name)s;
""" % locals()
return init + """
int i =0;
int length = PyList_GET_SIZE((PyObject*) %(toAppend)s);
if(%(output_name)s==NULL){
%(fail)s
};
for(i; i < length; i++){
if(PyList_Append( (PyObject*) %(output_name)s,(PyObject*) PyList_GetItem((PyObject*) %(toAppend)s,i))==-1){
%(fail)s
};
}
Py_INCREF(%(output_name)s);
""" % locals()
def c_code_cache_version_(self):
return (1,)
extend = Extend()
"""
Append all elements of a list at the end of another list.
Parameters
----------
x
The typed list to extend.
toAppend
The typed list that will be added at the end of `x`.
"""
class Insert(Op):
# See doc in instance of this Op after the class definition.
__props__ = ("inplace",)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
# TODO: make destroy_handler support having views and
# destroyed version of multiple inputs.
# self.view_map = {0: [2]}
else:
# TODO: make destroy_handler support multiple view
# self.view_map = {0: [0, 2]}
self.view_map = {0: [0]}
def make_node(self, x, index, toInsert):
assert isinstance(x.type, TypedListType)
assert x.ttype == toInsert.type
if not isinstance(index, Variable):
index = T.constant(index, ndim=0, dtype='int64')
else:
assert index.dtype == 'int64'
assert isinstance(index, T.TensorVariable) and index.ndim == 0
return Apply(self, [x, index, toInsert], [x.type()])
def perform(self, node, inputs, outputs):
(x, index, toInsert) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
out[0] = x
# need to copy toAppend due to destroy_handler limitation
toInsert = _lessbroken_deepcopy(toInsert)
out[0].insert(index, toInsert)
def __str__(self):
return self.__class__.__name__
# DISABLED AS WE NEED TO UPDATE IT TO COPY toAppend().
def _c_code_(self, node, name, inp, out, sub):
x_name, index, toInsert = inp[0], inp[1], inp[2]
output_name = out[0]
fail = sub['fail']
if not self.inplace:
init = """
%(output_name)s = (PyListObject*) PyList_GetSlice((PyObject*) %(x_name)s, 0, PyList_GET_SIZE((PyObject*) %(x_name)s)) ;
""" % locals()
else:
init = """
%(output_name)s = %(x_name)s;
""" % locals()
return init + """
if(%(output_name)s==NULL){
%(fail)s
};
if(PyList_Insert((PyObject*) %(output_name)s, *((npy_int64 *) PyArray_DATA(%(index)s)), (PyObject*) %(toInsert)s)==-1){
%(fail)s
};
Py_INCREF(%(output_name)s);
""" % locals()
def c_code_cache_version(self):
return (1,)
insert = Insert()
"""
Insert an element at an index in a typed list.
Parameters
----------
x
The typed list to modify.
index
The index where to put the new element in `x`.
toInsert
The new element to insert.
"""
class Remove(Op):
# See doc in instance of this Op after the class definition.
__props__ = ("inplace",)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
else:
self.view_map = {0: [0]}
def make_node(self, x, toRemove):
assert isinstance(x.type, TypedListType)
assert x.ttype == toRemove.type
return Apply(self, [x, toRemove], [x.type()])
def perform(self, node, inputs, outputs):
(x, toRemove) = inputs
(out,) = outputs
if not self.inplace:
out[0] = list(x)
else:
out[0] = x
"""
Inelegant workaround for ValueError: The truth value of an
array with more than one element is ambiguous. Use a.any() or a.all()
being thrown when trying to remove a matrix from a matrices list.
"""
for y in range(out[0].__len__()):
if node.inputs[0].ttype.values_eq(out[0][y], toRemove):
del out[0][y]
break
def __str__(self):
return self.__class__.__name__
remove = Remove()
"""Remove an element from a typed list.
Parameters
----------
x
The typed list to be changed.
toRemove
An element to be removed from the typed list.
We only remove the first instance.
Notes
-----
Python implementation of remove doesn't work when we want to remove an ndarray
from a list. This implementation works in that case.
"""
class Reverse(Op):
# See doc in instance of this Op after the class definition.
__props__ = ("inplace",)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
else:
self.view_map = {0: [0]}
def make_node(self, x):
assert isinstance(x.type, TypedListType)
return Apply(self, [x], [x.type()])
def perform(self, node, inp, outputs):
(out,) = outputs
if not self.inplace:
out[0] = list(inp[0])
else:
out[0] = inp[0]
out[0].reverse()
def __str__(self):
return self.__class__.__name__
def c_code(self, node, name, inp, out, sub):
x_name = inp[0]
output_name = out[0]
fail = sub['fail']
if not self.inplace:
init = """
%(output_name)s = (PyListObject*) PyList_GetSlice((PyObject*) %(x_name)s, 0, PyList_GET_SIZE((PyObject*) %(x_name)s)) ;
""" % locals()
else:
init = """
%(output_name)s = %(x_name)s;
""" % locals()
return init + """
if(%(output_name)s==NULL){
%(fail)s
};
if(PyList_Reverse((PyObject*) %(output_name)s)==-1){
%(fail)s
};
Py_INCREF(%(output_name)s);
""" % locals()
def c_code_cache_version(self):
return (1,)
reverse = Reverse()
"""
Reverse the order of a typed list.
Parameters
----------
x
The typed list to be reversed.
"""
class Index(Op):
# See doc in instance of this Op after the class definition.
__props__ = ()
def make_node(self, x, elem):
assert isinstance(x.type, TypedListType)
assert x.ttype == elem.type
return Apply(self, [x, elem], [T.scalar()])
def perform(self, node, inputs, outputs):
"""
Inelegant workaround for ValueError: The truth value of an
array with more than one element is ambiguous. Use a.any() or a.all()
being thrown when trying to remove a matrix from a matrices list
"""
(x, elem) = inputs
(out,) = outputs
for y in range(len(x)):
if node.inputs[0].ttype.values_eq(x[y], elem):
out[0] = numpy.asarray(y, dtype=theano.config.floatX)
break
def __str__(self):
return self.__class__.__name__
index_ = Index()
class Count(Op):
# See doc in instance of this Op after the class definition.
__props__ = ()
def make_node(self, x, elem):
assert isinstance(x.type, TypedListType)
assert x.ttype == elem.type
return Apply(self, [x, elem], [T.scalar()])
def perform(self, node, inputs, outputs):
"""
Inelegant workaround for ValueError: The truth value of an
array with more than one element is ambiguous. Use a.any() or a.all()
being thrown when trying to remove a matrix from a matrices list
"""
(x, elem) = inputs
(out,) = outputs
out[0] = 0
for y in range(len(x)):
if node.inputs[0].ttype.values_eq(x[y], elem):
out[0] += 1
out[0] = numpy.asarray(out[0], dtype=theano.config.floatX)
def __str__(self):
return self.__class__.__name__
count = Count()
"""
Count the number of times an element is in the typed list.
Parameters
----------
x
The typed list to look into.
elem
The element we want to count in list.
The elements are compared with equals.
Notes
-----
Python implementation of count doesn't work when we want to count an ndarray
from a list. This implementation works in that case.
"""
class Length(Op):
# See doc in instance of this Op after the class definition.
__props__ = ()
def make_node(self, x):
assert isinstance(x.type, TypedListType)
return Apply(self, [x], [T.scalar(dtype='int64')])
def perform(self, node, x, outputs):
(out,) = outputs
out[0] = numpy.asarray(len(x[0]), 'int64')
def __str__(self):
return self.__class__.__name__
def c_code(self, node, name, inp, out, sub):
x_name = inp[0]
output_name = out[0]
fail = sub['fail']
return """
if(!%(output_name)s)
%(output_name)s=(PyArrayObject*)PyArray_EMPTY(0, NULL, NPY_INT64, 0);
((npy_int64*)PyArray_DATA(%(output_name)s))[0]=PyList_Size((PyObject*)%(x_name)s);
Py_INCREF(%(output_name)s);
""" % locals()
def c_code_cache_version(self):
return (1,)
length = Length()
"""
Returns the size of a list.
Parameters
----------
x
Typed list.
"""
class MakeList(Op):
__props__ = ()
def make_node(self, a):
assert isinstance(a, (tuple, list))
a2 = []
for elem in a:
if not isinstance(elem, theano.gof.Variable):
elem = theano.tensor.as_tensor_variable(elem)
a2.append(elem)
if not all(a2[0].type == elem.type for elem in a2):
raise TypeError(
"MakeList need all input variable to be of the same type.")
tl = theano.typed_list.TypedListType(a2[0].type)()
return Apply(self, a2, [tl])
def perform(self, node, inputs, outputs):
(out,) = outputs
# We need to make sure that we don't get a view on our inputs
out[0] = [_lessbroken_deepcopy(inp) for inp in inputs]
make_list = MakeList()
"""
Build a Python list from those Theano variable.
Parameters
----------
a : tuple/list of Theano variable
Notes
-----
All Theano variables must have the same type.
"""
| [
"theano.tensor.constant",
"theano.compile.debugmode._lessbroken_deepcopy",
"theano.tensor.type_other.SliceType",
"numpy.asarray",
"theano.gof.Apply",
"theano.tensor.as_tensor_variable",
"theano.tensor.scalar",
"theano.typed_list.TypedListType"
] | [((4437, 4467), 'theano.compile.debugmode._lessbroken_deepcopy', '_lessbroken_deepcopy', (['toAppend'], {}), '(toAppend)\n', (4457, 4467), False, 'from theano.compile.debugmode import _lessbroken_deepcopy\n'), ((9291, 9321), 'theano.compile.debugmode._lessbroken_deepcopy', '_lessbroken_deepcopy', (['toInsert'], {}), '(toInsert)\n', (9311, 9321), False, 'from theano.compile.debugmode import _lessbroken_deepcopy\n'), ((15310, 15359), 'numpy.asarray', 'numpy.asarray', (['out[0]'], {'dtype': 'theano.config.floatX'}), '(out[0], dtype=theano.config.floatX)\n', (15323, 15359), False, 'import numpy\n'), ((17326, 17347), 'theano.gof.Apply', 'Apply', (['self', 'a2', '[tl]'], {}), '(self, a2, [tl])\n', (17331, 17347), False, 'from theano.gof import Apply, Constant, Op, Variable\n'), ((8768, 8808), 'theano.tensor.constant', 'T.constant', (['index'], {'ndim': '(0)', 'dtype': '"""int64"""'}), "(index, ndim=0, dtype='int64')\n", (8778, 8808), True, 'from theano import tensor as T\n'), ((17264, 17307), 'theano.typed_list.TypedListType', 'theano.typed_list.TypedListType', (['a2[0].type'], {}), '(a2[0].type)\n', (17295, 17307), False, 'import theano\n'), ((17508, 17533), 'theano.compile.debugmode._lessbroken_deepcopy', '_lessbroken_deepcopy', (['inp'], {}), '(inp)\n', (17528, 17533), False, 'from theano.compile.debugmode import _lessbroken_deepcopy\n'), ((2029, 2069), 'theano.tensor.constant', 'T.constant', (['index'], {'ndim': '(0)', 'dtype': '"""int64"""'}), "(index, ndim=0, dtype='int64')\n", (2039, 2069), True, 'from theano import tensor as T\n'), ((13921, 13931), 'theano.tensor.scalar', 'T.scalar', ([], {}), '()\n', (13929, 13931), True, 'from theano import tensor as T\n'), ((14391, 14435), 'numpy.asarray', 'numpy.asarray', (['y'], {'dtype': 'theano.config.floatX'}), '(y, dtype=theano.config.floatX)\n', (14404, 14435), False, 'import numpy\n'), ((14801, 14811), 'theano.tensor.scalar', 'T.scalar', ([], {}), '()\n', (14809, 14811), True, 'from theano import tensor as T\n'), ((16014, 16037), 'theano.tensor.scalar', 'T.scalar', ([], {'dtype': '"""int64"""'}), "(dtype='int64')\n", (16022, 16037), True, 'from theano import tensor as T\n'), ((17019, 17057), 'theano.tensor.as_tensor_variable', 'theano.tensor.as_tensor_variable', (['elem'], {}), '(elem)\n', (17051, 17057), False, 'import theano\n'), ((1908, 1919), 'theano.tensor.type_other.SliceType', 'SliceType', ([], {}), '()\n', (1917, 1919), False, 'from theano.tensor.type_other import SliceType\n'), ((6685, 6708), 'theano.compile.debugmode._lessbroken_deepcopy', '_lessbroken_deepcopy', (['i'], {}), '(i)\n', (6705, 6708), False, 'from theano.compile.debugmode import _lessbroken_deepcopy\n')] |
################################################################################
# skforecast #
# #
# This work by <NAME> is licensed under a Creative Commons #
# Attribution 4.0 International License. #
################################################################################
# coding=utf-8
import typing
from typing import Union, Dict, List, Tuple
import warnings
import logging
import numpy as np
import pandas as pd
import sklearn
import tqdm
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_percentage_error
logging.basicConfig(
format = '%(asctime)-5s %(name)-10s %(levelname)-5s %(message)s',
level = logging.INFO,
)
################################################################################
# ForecasterAutoregCustom #
################################################################################
class ForecasterAutoregCustom():
'''
This class turns any regressor compatible with the scikit-learn API into a
recursive (multi-step) forecaster with a custom function to create predictors.
Parameters
----------
regressor : any regressor compatible with the scikit-learn API
An instance of a regressor compatible with the scikit-learn API.
fun_predictors: Callable
Function that takes a time series window as an argument and returns an
`np.array` with the predictors associated with that window.
window_size: int
Size of the window needed by `fun_predictors` to create the predictors.
Attributes
----------
regressor : regressor compatible with the scikit-learn API
An instance of a regressor compatible with the scikit-learn API.
fun_predictors: Callable
Function that takes a time series window as an argument and returns an
`np.array` with the predictors associated with that window.
window_size: int
Size of the window needed by `fun_predictors` to create the predictors.
last_window : 1D np.ndarray
Last time window the forecaster has seen when trained. It stores the
values needed to calculate the predictors for the next `step` after the
training data.
included_exog : bool
If the forecaster has been trained using exogenous variable/s.
exog_type : type
Type used for the exogenous variable/s.
exog_shape : tuple
Shape of exog used in training.
in_sample_residuals: np.ndarray
Residuals of the model when predicting training data. Only stored up to
1000 values.
out_sample_residuals: np.ndarray
Residuals of the model when predicting non training data. Only stored
up to 1000 values.
fitted: Bool
Tag to identify if the estimator is fitted.
'''
def __init__(self, regressor, fun_predictors: callable, window_size: int) -> None:
self.regressor = regressor
self.create_predictors = fun_predictors
self.window_size = window_size
self.last_window = None
self.included_exog = False
self.exog_type = None
self.exog_shape = None
self.in_sample_residuals = None
self.out_sample_residuals = None
self.fitted = False
if not isinstance(window_size, int):
raise Exception(f'`window_size` must be int, got {type(window_size)}')
if not callable(fun_predictors):
raise Exception(f'`fun_predictors` must be callable, got {type(fun_predictors)}')
def __repr__(self) -> str:
'''
Information displayed when a ForecasterAutoregCustom object is printed.
'''
info = "=======================" \
+ "ForecasterAutoregCustom" \
+ "=======================" \
+ "\n" \
+ "Regressor: " + str(self.regressor) \
+ "\n" \
+ "Predictors created with: " + str(self.create_predictors.__name__) \
+ "\n" \
+ "Window size: " + str(self.window_size) \
+ "\n" \
+ "Exogenous variable: " + str(self.included_exog) + ', ' + str(self.exog_type) \
+ "\n" \
+ "Parameters: " + str(self.regressor.get_params())
return info
def create_train_X_y(self, y: Union[np.ndarray, pd.Series],
exog: Union[np.ndarray, pd.Series, pd.DataFrame]=None
) -> Tuple[np.array, np.array]:
'''
Create training matrices X, y
Parameters
----------
y : 1D np.ndarray, pd.Series
Training time series.
exog : np.ndarray, pd.Series, pd.DataFrame, default `None`
Exogenous variable/s included as predictor/s. Must have the same
number of observations as `y` and should be aligned so that y[i] is
regressed on exog[i].
Returns
-------
X_train : 2D np.ndarray, shape (len(y) - self.max_lag, len(self.lags))
2D array with the training values (predictors).
y_train : 1D np.ndarray, shape (len(y) - self.max_lag,)
Values (target) of the time series related to each row of `X_train`.
'''
self._check_y(y=y)
y = self._preproces_y(y=y)
if exog is not None:
self._check_exog(exog=exog)
exog = self._preproces_exog(exog=exog)
self.included_exog = True
self.exog_shape = exog.shape
if exog.shape[0] != len(y):
raise Exception(
f"`exog` must have same number of samples as `y`"
)
if len(y) - self.window_size < 1:
raise Exception(
f'`y` must have as many values as the windows_size needed by {self.create_predictors.__name__}.'
f'For this Forecaster the minimum lenght is {self.window_size + 1}'
)
X_train = []
y_train = []
for i in range(len(y) - self.window_size):
train_index = np.arange(i, self.window_size + i)
test_index = self.window_size + i
X_train.append(self.create_predictors(y=y[train_index]))
y_train.append(y[test_index])
X_train = np.vstack(X_train)
y_train = np.array(y_train)
if np.isnan(X_train).any():
raise Exception(
f"`create_predictors()` is returning `NaN` values."
)
if exog is not None:
# The first `self.window_size` positions have to be removed from
# exog since they are not in X_train.
X_train = np.column_stack((X_train, exog[self.window_size:,]))
return X_train, y_train
def fit(self, y: Union[np.ndarray, pd.Series],
exog: Union[np.ndarray, pd.Series, pd.DataFrame]=None) -> None:
'''
Training ForecasterAutoregCustom
Parameters
----------
y : 1D np.ndarray, pd.Series
Training time series.
exog : np.ndarray, pd.Series, pd.DataFrame, default `None`
Exogenous variable/s included as predictor/s. Must have the same
number of observations as `y` and should be aligned so that y[i] is
regressed on exog[i].
Returns
-------
self : ForecasterAutoregCustom
Trained ForecasterAutoregCustom
'''
# Reset values in case the forecaster has already been fitted before.
self.included_exog = False
self.exog_type = None
self.exog_shape = None
self._check_y(y=y)
y = self._preproces_y(y=y)
if exog is not None:
self._check_exog(exog=exog)
self.exog_type = type(exog)
exog = self._preproces_exog(exog=exog)
self.included_exog = True
self.exog_shape = exog.shape
if exog.shape[0] != len(y):
raise Exception(
f"`exog` must have same number of samples as `y`"
)
if len(y) - self.window_size < 1:
raise Exception(
f'`y` must have as many values as the windows_size needed by {self.create_predictors.__name__}.'
f'For this Forecaster the minimum lenght is {self.window_size + 1}'
)
X_train, y_train = self.create_train_X_y(y=y, exog=exog)
self.regressor.fit(X=X_train, y=y_train)
self.fitted = True
residuals = y_train - self.regressor.predict(X_train)
if len(residuals) > 1000:
# Only up to 1000 residuals are stored
residuals = np.random.choice(a=residuals, size=1000, replace=False)
self.in_sample_residuals = residuals
# The last time window of training data is stored so that predictors in
# the first iteration of `predict()` can be calculated.
self.last_window = y_train.flatten()[-self.window_size:]
def predict(self, steps: int, last_window: Union[np.ndarray, pd.Series]=None,
exog: Union[np.ndarray, pd.Series, pd.DataFrame]=None) -> np.ndarray:
'''
Iterative process in which, each prediction, is used as a predictor
for the next step.
Parameters
----------
steps : int
Number of future steps predicted.
last_window : 1D np.ndarray, pd.Series, default `None`
Values of the series used to create the predictors need in the first
iteration of predictiont (t + 1).
If `last_window = None`, the values stored in` self.last_window` are
used to calculate the initial predictors, and the predictions start
right after training data.
exog : np.ndarray, pd.Series, pd.DataFrame, default `None`
Exogenous variable/s included as predictor/s.
Returns
-------
predicciones : 1D np.array, shape (steps,)
Values predicted.
'''
if not self.fitted:
raise Exception(
'This Forecaster instance is not fitted yet. Call `fit` with appropriate arguments before using this it.'
)
if steps < 1:
raise Exception(
f"`steps` must be integer greater than 0. Got {steps}."
)
if exog is None and self.included_exog:
raise Exception(
f"Forecaster trained with exogenous variable/s. "
f"Same variable/s must be provided in `predict()`."
)
if exog is not None and not self.included_exog:
raise Exception(
f"Forecaster trained without exogenous variable/s. "
f"`exog` must be `None` in `predict()`."
)
if exog is not None:
self._check_exog(
exog=exog, ref_type = self.exog_type, ref_shape=self.exog_shape
)
exog = self._preproces_exog(exog=exog)
if exog.shape[0] < steps:
raise Exception(
f"`exog` must have at least as many values as `steps` predicted."
)
if last_window is not None:
self._check_last_window(last_window=last_window)
last_window = self._preproces_last_window(last_window=last_window)
if last_window.shape[0] < self.window_size:
raise Exception(
f"`last_window` must have as many values as as needed to "
f"calculate the predictors ({self.window_size})."
)
else:
last_window = self.last_window.copy()
predictions = np.full(shape=steps, fill_value=np.nan)
for i in range(steps):
X = self.create_predictors(y=last_window)
if np.isnan(X).any():
raise Exception(
f"`create_predictors()` is returning `NaN` values."
)
if exog is None:
prediction = self.regressor.predict(X)
else:
prediction = self.regressor.predict(
np.column_stack((X, exog[i,].reshape(1, -1)))
)
predictions[i] = prediction.ravel()[0]
# Update `last_window` values. The first position is discarded and
# the new prediction is added at the end.
last_window = np.append(last_window[1:], prediction)
return predictions
def _estimate_boot_interval(self, steps: int,
last_window: Union[np.ndarray, pd.Series]=None,
exog: Union[np.ndarray, pd.Series, pd.DataFrame]=None,
interval: list=[5, 95], n_boot: int=500,
in_sample_residuals: bool=True) -> np.ndarray:
'''
Iterative process in which, each prediction, is used as a predictor
for the next step and bootstrapping is used to estimate prediction
intervals. This method only returns prediction intervals.
See predict_intervals() to calculate both, predictions and intervals.
Parameters
----------
steps : int
Number of future steps predicted.
last_window : 1D np.ndarray, pd.Series, default `None`
Values of the series used to create the predictors need in the first
iteration of predictiont (t + 1).
If `last_window = None`, the values stored in` self.last_window` are
used to calculate the initial predictors, and the predictions start
right after training data.
exog : np.ndarray, pd.Series, pd.DataFrame, default `None`
Exogenous variable/s included as predictor/s.
n_boot: int, default `100`
Number of bootstrapping iterations used to estimate prediction
intervals.
interval: list, default `[5, 100]`
Confidence of the prediction interval estimated. Sequence of percentiles
to compute, which must be between 0 and 100 inclusive.
in_sample_residuals: bool, default `True`
If `True`, residuals from the training data are used as proxy of
prediction error to create prediction intervals. If `False`, out of
sample residuals are used. In the latter case, the user shoud have
calculated and stored the residuals within the forecaster (see
`set_out_sample_residuals()`).
Returns
-------
predicction_interval : np.array, shape (steps, 2)
Interval estimated for each prediction by bootstrapping.
Notes
-----
More information about prediction intervals in forecasting:
https://otexts.com/fpp2/prediction-intervals.html
Forecasting: Principles and Practice (2nd ed) <NAME> and
<NAME>.
'''
if steps < 1:
raise Exception(
f"`steps` must be integer greater than 0. Got {steps}."
)
if not in_sample_residuals and self.out_sample_residuals is None:
raise Exception(
('out_sample_residuals is empty. In order to estimate prediction '
'intervals using out of sample residuals, the user shoud have '
'calculated and stored the residuals within the forecaster (see'
'`set_out_sample_residuals()`.')
)
if exog is None and self.included_exog:
raise Exception(
f"Forecaster trained with exogenous variable/s. "
f"Same variable/s must be provided in `predict()`."
)
if exog is not None and not self.included_exog:
raise Exception(
f"Forecaster trained without exogenous variable/s. "
f"`exog` must be `None` in `predict()`."
)
if exog is not None:
self._check_exog(
exog=exog, ref_type = self.exog_type, ref_shape=self.exog_shape
)
exog = self._preproces_exog(exog=exog)
if exog.shape[0] < steps:
raise Exception(
f"`exog` must have at least as many values as `steps` predicted."
)
if last_window is not None:
self._check_last_window(last_window=last_window)
last_window = self._preproces_last_window(last_window=last_window)
if last_window.shape[0] < self.window_size:
raise Exception(
f"`last_window` must have as many values as as needed to "
f"calculate the predictors ({self.window_size})."
)
else:
last_window = self.last_window.copy()
boot_predictions = np.full(
shape = (steps, n_boot),
fill_value = np.nan,
dtype = float
)
for i in range(n_boot):
# In each bootstraping iteration the initial last_window and exog
# need to be restored.
last_window_boot = last_window.copy()
if exog is not None:
exog_boot = exog.copy()
else:
exog_boot = None
if in_sample_residuals:
residuals = self.in_sample_residuals
else:
residuals = self.out_sample_residuals
sample_residuals = np.random.choice(
a = residuals,
size = steps,
replace = True
)
for step in range(steps):
prediction = self.predict(
steps = 1,
last_window = last_window_boot,
exog = exog_boot
)
prediction_with_residual = prediction + sample_residuals[step]
boot_predictions[step, i] = prediction_with_residual
last_window_boot = np.append(
last_window_boot[1:],
prediction_with_residual
)
if exog is not None:
exog_boot = exog_boot[1:]
prediction_interval = np.percentile(boot_predictions, q=interval, axis=1)
prediction_interval = prediction_interval.transpose()
return prediction_interval
def predict_interval(self, steps: int, last_window: Union[np.ndarray, pd.Series]=None,
exog: Union[np.ndarray, pd.Series, pd.DataFrame]=None,
interval: list=[5, 95], n_boot: int=500,
in_sample_residuals: bool=True) -> np.ndarray:
'''
Iterative process in which, each prediction, is used as a predictor
for the next step and bootstrapping is used to estimate prediction
intervals. Both, predictions and intervals, are returned.
Parameters
----------
steps : int
Number of future steps predicted.
last_window : 1D np.ndarray, pd.Series, default `None`
Values of the series used to create the predictors need in the first
iteration of predictiont (t + 1).
If `last_window = None`, the values stored in` self.last_window` are
used to calculate the initial predictors, and the predictions start
right after training data.
exog : np.ndarray, pd.Series, pd.DataFrame, default `None`
Exogenous variable/s included as predictor/s.
interval: list, default `[5, 100]`
Confidence of the prediction interval estimated. Sequence of percentiles
to compute, which must be between 0 and 100 inclusive.
n_boot: int, default `500`
Number of bootstrapping iterations used to estimate prediction
intervals.
in_sample_residuals: bool, default `True`
If `True`, residuals from the training data are used as proxy of
prediction error to create prediction intervals. If `False`, out of
sample residuals are used. In the latter case, the user shoud have
calculated and stored the residuals within the forecaster (see
`set_out_sample_residuals()`).
Returns
-------
predictions : np.array, shape (steps, 3)
Values predicted by the forecaster and their estimated interval.
Column 0 = predictions
Column 1 = lower bound interval
Column 2 = upper bound interval
Notes
-----
More information about prediction intervals in forecasting:
https://otexts.com/fpp2/prediction-intervals.html
Forecasting: Principles and Practice (2nd ed) <NAME> and
<NAME>.
'''
if steps < 1:
raise Exception(
f"`steps` must be integer greater than 0. Got {steps}."
)
if not in_sample_residuals and self.out_sample_residuals is None:
raise Exception(
('out_sample_residuals is empty. In order to estimate prediction '
'intervals using out of sample residuals, the user shoud have '
'calculated and stored the residuals within the forecaster (see'
'`set_out_sample_residuals()`.')
)
if exog is None and self.included_exog:
raise Exception(
f"Forecaster trained with exogenous variable/s. "
f"Same variable/s must be provided in `predict()`."
)
if exog is not None and not self.included_exog:
raise Exception(
f"Forecaster trained without exogenous variable/s. "
f"`exog` must be `None` in `predict()`."
)
if exog is not None:
self._check_exog(
exog=exog, ref_type = self.exog_type, ref_shape=self.exog_shape
)
exog = self._preproces_exog(exog=exog)
if exog.shape[0] < steps:
raise Exception(
f"`exog` must have at least as many values as `steps` predicted."
)
if last_window is not None:
self._check_last_window(last_window=last_window)
last_window = self._preproces_last_window(last_window=last_window)
if last_window.shape[0] < self.window_size:
raise Exception(
f"`last_window` must have as many values as as needed to "
f"calculate the predictors ({self.window_size})."
)
else:
last_window = self.last_window.copy()
# Since during predict() `last_window` and `exog` are modified, the
# originals are stored to be used later
last_window_original = last_window.copy()
if exog is not None:
exog_original = exog.copy()
else:
exog_original = exog
predictions = self.predict(
steps = steps,
last_window = last_window,
exog = exog
)
predictions_interval = self._estimate_boot_interval(
steps = steps,
last_window = last_window_original,
exog = exog_original,
interval = interval,
n_boot = n_boot,
in_sample_residuals = in_sample_residuals
)
predictions = np.column_stack((predictions, predictions_interval))
return predictions
def _check_y(self, y: Union[np.ndarray, pd.Series]) -> None:
'''
Raise Exception if `y` is not 1D `np.ndarray` or `pd.Series`.
Parameters
----------
y : np.ndarray, pd.Series
Time series values
'''
if not isinstance(y, (np.ndarray, pd.Series)):
raise Exception('`y` must be `1D np.ndarray` or `pd.Series`.')
elif isinstance(y, np.ndarray) and y.ndim != 1:
raise Exception(
f"`y` must be `1D np.ndarray` o `pd.Series`, "
f"got `np.ndarray` with {y.ndim} dimensions."
)
return
def _check_last_window(self, last_window: Union[np.ndarray, pd.Series]) -> None:
'''
Raise Exception if `last_window` is not 1D `np.ndarray` or `pd.Series`.
Parameters
----------
last_window : np.ndarray, pd.Series
Time series values
'''
if not isinstance(last_window, (np.ndarray, pd.Series)):
raise Exception('`last_window` must be `1D np.ndarray` or `pd.Series`.')
elif isinstance(last_window, np.ndarray) and last_window.ndim != 1:
raise Exception(
f"`last_window` must be `1D np.ndarray` o `pd.Series`, "
f"got `np.ndarray` with {last_window.ndim} dimensions."
)
return
def _check_exog(self, exog: Union[np.ndarray, pd.Series, pd.DataFrame],
ref_type: type=None, ref_shape: tuple=None) -> None:
'''
Raise Exception if `exog` is not `np.ndarray`, `pd.Series` or `pd.DataFrame`.
If `ref_shape` is provided, raise Exception if `ref_shape[1]` do not match
`exog.shape[1]` (number of columns).
Parameters
----------
exog : np.ndarray, pd.Series, pd.DataFrame
Exogenous variable/s included as predictor/s.
exog_type : type, default `None`
Type of reference for exog.
exog_shape : tuple, default `None`
Shape of reference for exog.
'''
if not isinstance(exog, (np.ndarray, pd.Series, pd.DataFrame)):
raise Exception('`exog` must be `np.ndarray`, `pd.Series` or `pd.DataFrame`.')
if isinstance(exog, np.ndarray) and exog.ndim > 2:
raise Exception(
f" If `exog` is `np.ndarray`, maximum allowed dim=2. "
f"Got {exog.ndim}."
)
if ref_type is not None:
if ref_type == pd.Series:
if isinstance(exog, pd.Series):
return
elif isinstance(exog, np.ndarray) and exog.ndim == 1:
return
elif isinstance(exog, np.ndarray) and exog.shape[1] == 1:
return
else:
raise Exception(
f"`exog` must be: `pd.Series`, `np.ndarray` with 1 dimension "
f"or `np.ndarray` with 1 column in the second dimension. "
f"Got `np.ndarray` with {exog.shape[1]} columns."
)
if ref_type == np.ndarray:
if exog.ndim == 1 and ref_shape[1] == 1:
return
elif exog.ndim == 1 and ref_shape[1] > 1:
raise Exception(
f"`exog` must have {ref_shape[1]} columns. "
f"Got `np.ndarray` with 1 dimension or `pd.Series`."
)
elif ref_shape[1] != exog.shape[1]:
raise Exception(
f"`exog` must have {ref_shape[1]} columns. "
f"Got `np.ndarray` with {exog.shape[1]} columns."
)
if ref_type == pd.DataFrame:
if ref_shape[1] != exog.shape[1]:
raise Exception(
f"`exog` must have {ref_shape[1]} columns. "
f"Got `pd.DataFrame` with {exog.shape[1]} columns."
)
return
def _preproces_y(self, y: Union[np.ndarray, pd.Series]) -> np.ndarray:
'''
Transforms `y` to 1D `np.ndarray` if it is `pd.Series`.
Parameters
----------
y :1D np.ndarray, pd.Series
Time series values
Returns
-------
y: 1D np.ndarray, shape(samples,)
'''
if isinstance(y, pd.Series):
return y.to_numpy(copy=True)
else:
return y
def _preproces_last_window(self, last_window: Union[np.ndarray, pd.Series]) -> np.ndarray:
'''
Transforms `last_window` to 1D `np.ndarray` if it is `pd.Series`.
Parameters
----------
last_window :1D np.ndarray, pd.Series
Time series values
Returns
-------
last_window: 1D np.ndarray, shape(samples,)
'''
if isinstance(last_window, pd.Series):
return last_window.to_numpy(copy=True)
else:
return last_window
def _preproces_exog(self, exog: Union[np.ndarray, pd.Series, pd.DataFrame]) -> np.ndarray:
'''
Transforms `exog` to `np.ndarray` if it is `pd.Series` or `pd.DataFrame`.
If 1D `np.ndarray` reshape it to (n_samples, 1)
Parameters
----------
exog : np.ndarray, pd.Series
Time series values
Returns
-------
exog: np.ndarray, shape(samples,)
'''
if isinstance(exog, pd.Series):
exog = exog.to_numpy(copy=True).reshape(-1, 1)
elif isinstance(exog, np.ndarray) and exog.ndim == 1:
exog = exog.reshape(-1, 1)
elif isinstance(exog, pd.DataFrame):
exog = exog.to_numpy(copy=True)
return exog
def set_params(self, **params: dict) -> None:
'''
Set new values to the parameters of the scikit learn model stored in the
ForecasterAutoregCustom.
Parameters
----------
params : dict
Parameters values.
Returns
-------
self
'''
self.regressor.set_params(**params)
def set_out_sample_residuals(self, residuals: np.ndarray, append: bool=True)-> None:
'''
Set new values to the attribute `out_sample_residuals`. Out of sample
residuals are meant to be calculated using observations that did not
participate in the training process.
Parameters
----------
params : 1D np.ndarray
Values of residuals. If len(residuals) > 1000, only a random sample
of 1000 values are stored.
append : bool, default `True`
If `True`, new residuals are added to the once already stored in the attribute
`out_sample_residuals`. Once the limit of 1000 values is reached, no more values
are appended. If False, `out_sample_residuals` is overwrited with the new residuals.
Returns
-------
self
'''
if not isinstance(residuals, np.ndarray):
raise Exception(
f"`residuals` argument must be `1D np.ndarray`. Got {type(residuals)}"
)
if len(residuals) > 1000:
residuals = np.random.choice(a=residuals, size=1000, replace=False)
if not append or self.out_sample_residuals is None:
self.out_sample_residuals = residuals
else:
free_space = max(0, 1000 - len(self.out_sample_residuals))
if len(residuals) < free_space:
self.out_sample_residuals = np.hstack((self.out_sample_residuals, residuals))
else:
self.out_sample_residuals = np.hstack((self.out_sample_residuals, residuals[:free_space]))
def get_coef(self) -> np.ndarray:
'''
Return estimated coefficients for the linear regression model stored in
the forecaster. Only valid when the forecaster has been trained using
as `regressor: `LinearRegression()`, `Lasso()` or `Ridge()`.
Parameters
----------
self
Returns
-------
coef : 1D np.ndarray
Value of the coefficients associated with each predictor.
Coefficients are aligned so that `coef[i]` is the value associated
with predictor i returned by `self.create_predictors`.
'''
valid_instances = (sklearn.linear_model._base.LinearRegression,
sklearn.linear_model._coordinate_descent.Lasso,
sklearn.linear_model._ridge.Ridge
)
if not isinstance(self.regressor, valid_instances):
warnings.warn(
('Only forecasters with `regressor` `LinearRegression()`, ' +
' `Lasso()` or `Ridge()` have coef.')
)
return
else:
coef = self.regressor.coef_
return coef
def get_feature_importances(self) -> np.ndarray:
'''
Return impurity-based feature importances of the model stored in the
forecaster. Only valid when the forecaster has been trained using
`regressor=GradientBoostingRegressor()` or `regressor=RandomForestRegressor`.
Parameters
----------
self
Returns
-------
feature_importances : 1D np.ndarray
Impurity-based feature importances associated with each predictor.
Values are aligned so that `feature_importances[i]` is the value
associated with predictor i returned by `self.create_predictors`.
'''
if not isinstance(self.regressor,
(sklearn.ensemble._forest.RandomForestRegressor,
sklearn.ensemble._gb.GradientBoostingRegressor)):
warnings.warn(
('Only forecasters with `regressor=GradientBoostingRegressor()` '
'or `regressor=RandomForestRegressor`.')
)
return
else:
feature_importances = self.regressor.feature_importances_
return feature_importances | [
"logging.basicConfig",
"numpy.hstack",
"numpy.random.choice",
"numpy.column_stack",
"numpy.append",
"numpy.array",
"numpy.isnan",
"numpy.vstack",
"numpy.percentile",
"warnings.warn",
"numpy.full",
"numpy.arange"
] | [((801, 914), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)-5s %(name)-10s %(levelname)-5s %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)-5s %(name)-10s %(levelname)-5s %(message)s', level=logging.INFO\n )\n", (820, 914), False, 'import logging\n'), ((6907, 6925), 'numpy.vstack', 'np.vstack', (['X_train'], {}), '(X_train)\n', (6916, 6925), True, 'import numpy as np\n'), ((6944, 6961), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (6952, 6961), True, 'import numpy as np\n'), ((12645, 12684), 'numpy.full', 'np.full', ([], {'shape': 'steps', 'fill_value': 'np.nan'}), '(shape=steps, fill_value=np.nan)\n', (12652, 12684), True, 'import numpy as np\n'), ((17986, 18048), 'numpy.full', 'np.full', ([], {'shape': '(steps, n_boot)', 'fill_value': 'np.nan', 'dtype': 'float'}), '(shape=(steps, n_boot), fill_value=np.nan, dtype=float)\n', (17993, 18048), True, 'import numpy as np\n'), ((19735, 19786), 'numpy.percentile', 'np.percentile', (['boot_predictions'], {'q': 'interval', 'axis': '(1)'}), '(boot_predictions, q=interval, axis=1)\n', (19748, 19786), True, 'import numpy as np\n'), ((25393, 25445), 'numpy.column_stack', 'np.column_stack', (['(predictions, predictions_interval)'], {}), '((predictions, predictions_interval))\n', (25408, 25445), True, 'import numpy as np\n'), ((6686, 6720), 'numpy.arange', 'np.arange', (['i', '(self.window_size + i)'], {}), '(i, self.window_size + i)\n', (6695, 6720), True, 'import numpy as np\n'), ((7305, 7357), 'numpy.column_stack', 'np.column_stack', (['(X_train, exog[self.window_size:,])'], {}), '((X_train, exog[self.window_size:,]))\n', (7320, 7357), True, 'import numpy as np\n'), ((9434, 9489), 'numpy.random.choice', 'np.random.choice', ([], {'a': 'residuals', 'size': '(1000)', 'replace': '(False)'}), '(a=residuals, size=1000, replace=False)\n', (9450, 9489), True, 'import numpy as np\n'), ((13404, 13442), 'numpy.append', 'np.append', (['last_window[1:]', 'prediction'], {}), '(last_window[1:], prediction)\n', (13413, 13442), True, 'import numpy as np\n'), ((18722, 18777), 'numpy.random.choice', 'np.random.choice', ([], {'a': 'residuals', 'size': 'steps', 'replace': '(True)'}), '(a=residuals, size=steps, replace=True)\n', (18738, 18777), True, 'import numpy as np\n'), ((33250, 33305), 'numpy.random.choice', 'np.random.choice', ([], {'a': 'residuals', 'size': '(1000)', 'replace': '(False)'}), '(a=residuals, size=1000, replace=False)\n', (33266, 33305), True, 'import numpy as np\n'), ((34783, 34899), 'warnings.warn', 'warnings.warn', (["('Only forecasters with `regressor` `LinearRegression()`, ' +\n ' `Lasso()` or `Ridge()` have coef.')"], {}), "('Only forecasters with `regressor` `LinearRegression()`, ' +\n ' `Lasso()` or `Ridge()` have coef.')\n", (34796, 34899), False, 'import warnings\n'), ((35931, 36057), 'warnings.warn', 'warnings.warn', (['"""Only forecasters with `regressor=GradientBoostingRegressor()` or `regressor=RandomForestRegressor`."""'], {}), "(\n 'Only forecasters with `regressor=GradientBoostingRegressor()` or `regressor=RandomForestRegressor`.'\n )\n", (35944, 36057), False, 'import warnings\n'), ((6982, 6999), 'numpy.isnan', 'np.isnan', (['X_train'], {}), '(X_train)\n', (6990, 6999), True, 'import numpy as np\n'), ((19437, 19494), 'numpy.append', 'np.append', (['last_window_boot[1:]', 'prediction_with_residual'], {}), '(last_window_boot[1:], prediction_with_residual)\n', (19446, 19494), True, 'import numpy as np\n'), ((33623, 33672), 'numpy.hstack', 'np.hstack', (['(self.out_sample_residuals, residuals)'], {}), '((self.out_sample_residuals, residuals))\n', (33632, 33672), True, 'import numpy as np\n'), ((33735, 33797), 'numpy.hstack', 'np.hstack', (['(self.out_sample_residuals, residuals[:free_space])'], {}), '((self.out_sample_residuals, residuals[:free_space]))\n', (33744, 33797), True, 'import numpy as np\n'), ((12786, 12797), 'numpy.isnan', 'np.isnan', (['X'], {}), '(X)\n', (12794, 12797), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# @Time : 2018/05/18
# @Author : <NAME>
import datetime
import json
import cv2
import numpy as np
import time
import core
import os
from PIL import Image, ImageDraw
def transformation_points(src_img, src_points, dst_img, dst_points):
src_points = src_points.astype(np.float64)
dst_points = dst_points.astype(np.float64)
# print(src_points.shape)
# print(dst_points)
c1 = np.mean(src_points, axis=0)
c2 = np.mean(dst_points, axis=0)
src_points -= c1
dst_points -= c2
s1 = np.std(src_points)
s2 = np.std(dst_points)
src_points /= s1
dst_points /= s2
u, s, vt = np.linalg.svd(src_points.T * dst_points)
r = (u * vt).T
m = np.vstack([np.hstack(((s2 / s1) * r, c2.T - (s2 / s1) * r * c1.T)), np.matrix([0., 0., 1.])])
output = cv2.warpAffine(dst_img, m[:2],
(src_img.shape[1], src_img.shape[0]),
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output
def tran_matrix(src_img, src_points, dst_img, dst_points):
h = cv2.findHomography(dst_points, src_points)
output = cv2.warpAffine(dst_img, h[0][:2], (src_img.shape[1], src_img.shape[0]),
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output
def correct_color(img1, img2, landmark):
blur_amount = 0.4 * np.linalg.norm(
np.mean(landmark[core.LEFT_EYE_POINTS], axis=0)
- np.mean(landmark[core.RIGHT_EYE_POINTS], axis=0)
)
blur_amount = int(blur_amount)
if blur_amount % 2 == 0:
blur_amount += 1
img1_blur = cv2.GaussianBlur(img1, (blur_amount, blur_amount), 0)
img2_blur = cv2.GaussianBlur(img2, (blur_amount, blur_amount), 0)
img2_blur += (128 * (img2_blur <= 1.0)).astype(img2_blur.dtype)
return img2.astype(np.float64) * img1_blur.astype(np.float64) / img2_blur.astype(np.float64)
def tran_src(src_img, src_points, dst_points, face_area=None):
# print(1111111)
print(src_img.shape)
jaw = core.JAW_END
dst_list = dst_points \
+ core.matrix_rectangle(face_area[0], face_area[1], face_area[2], face_area[3]) \
+ core.matrix_rectangle(0, 0, src_img.shape[1], src_img.shape[0])
src_list = src_points \
+ core.matrix_rectangle(face_area[0], face_area[1], face_area[2], face_area[3]) \
+ core.matrix_rectangle(0, 0, src_img.shape[1], src_img.shape[0])
jaw_points = []
for i in range(0, jaw):
# print(i)
jaw_points.append(dst_list[i])
jaw_points.append(src_list[i])
warp_jaw = cv2.convexHull(np.array(jaw_points), returnPoints=False)
warp_jaw = warp_jaw.tolist()
for i in range(0, len(warp_jaw)):
warp_jaw[i] = warp_jaw[i][0]
warp_jaw.sort()
if len(warp_jaw) <= jaw:
dst_list = dst_list[jaw - len(warp_jaw):]
src_list = src_list[jaw - len(warp_jaw):]
for i in range(0, len(warp_jaw)):
dst_list[i] = jaw_points[int(warp_jaw[i])]
src_list[i] = jaw_points[int(warp_jaw[i])]
else:
for i in range(0, jaw):
if len(warp_jaw) > jaw and warp_jaw[i] == 2 * i and warp_jaw[i + 1] == 2 * i + 1:
warp_jaw.remove(2 * i)
dst_list[i] = jaw_points[int(warp_jaw[i])]
dt = core.measure_triangle(src_img, dst_list,src_points,dst_points)
res_img = np.zeros(src_img.shape, dtype=src_img.dtype)
for i in range(0, len(dt)):
t_src = []
t_dst = []
for j in range(0, 3):
t_src.append(src_list[dt[i][j]])
t_dst.append(dst_list[dt[i][j]])
if(checkLine(t_src) or checkLine(t_dst)):
# print("not checked")
continue
else:
core.affine_triangle(src_img, res_img, t_src, t_dst)
return res_img
def merge_img(src_img, dst_img, dst_matrix, dst_points, k_size=None, mat_multiple=None):
face_mask = np.zeros(src_img.shape, dtype=src_img.dtype)
for group in core.OVERLAY_POINTS:
cv2.fillConvexPoly(face_mask, cv2.convexHull(dst_matrix[group]), (255, 255, 255))
r = cv2.boundingRect(np.float32([dst_points[:core.FACE_END]]))
center = (r[0] + int(r[2] / 2), r[1] + int(r[3] / 2))
if mat_multiple:
mat = cv2.getRotationMatrix2D(center, 0, mat_multiple)
face_mask = cv2.warpAffine(face_mask, mat, (face_mask.shape[1], face_mask.shape[0]))
if k_size:
face_mask = cv2.blur(face_mask, k_size, center)
return cv2.seamlessClone(np.uint8(dst_img), src_img, face_mask, center, cv2.NORMAL_CLONE)
def drawLine(src_img,points):
src_img = src_img.astype(np.uint8)
im = Image.fromarray(src_img)
draw = ImageDraw.Draw(im)
# for i in points:
draw.line(points,width = 5,fill = (255, 0, 0))
return im
def morph_img(src_img, src_points, dst_img, dst_points, alpha=0.5):
morph_points = []
src_img = src_img.astype(np.float32)
dst_img = dst_img.astype(np.float32)
res_img = np.zeros(src_img.shape, src_img.dtype)
# for i in src_points:
# print(i)
# 这一步的目的是调整脸型,将原图关键点和目标图关键点之间取中间点,根据alpha值来取
for i in range(0, len(src_points)):
x = (1 - alpha) * src_points[i][0] + alpha * dst_points[i][0]
y = (1 - alpha) * src_points[i][1] + alpha * dst_points[i][1]
morph_points.append((x, y))
dt = core.measure_triangle(src_img, morph_points,src_points,dst_points)
for i in range(0, len(dt)):
t1 = []
t2 = []
t = []
for j in range(0, 3):
t1.append(src_points[dt[i][j]])
t2.append(dst_points[dt[i][j]])
t.append(morph_points[dt[i][j]])
if(checkLine(t) or checkLine(t1) or checkLine(t2)):
continue
core.morph_triangle(src_img, dst_img, res_img, t1, t2, t, alpha,i)
return res_img
def checkLine(t):
if(len(t) != 3):
return True
if(t[0] == t[1] or t[1] == t[2] or t[0] == t[2]) :
return True
return False
def face_merge(
src_img,
dst_img,
out_img,
alpha=0.75,
k_size=(10,5),
mat_multiple=0.5
):
src_matrix, src_points, src_faces,err = core.face_points(src_img)
##直接将第一次寻找目标人物读取的人脸数据作为参数传过来,减少查询人脸识别API次数
dst_matrix, dst_points, dst_faces,err = core.face_points(dst_img)
if not (isinstance(src_img,np.ndarray) and isinstance(dst_img,np.ndarray)):
src_img = cv2.imread(src_img, cv2.IMREAD_COLOR)
dst_img = cv2.imread(dst_img, cv2.IMREAD_COLOR)
dst_img = transformation_points(src_img=src_img, src_points=src_matrix[core.FACE_POINTS],
dst_img=dst_img, dst_points=dst_matrix[core.FACE_POINTS])
# 转换
trans_file = 'images/' + "trans"+ '.jpg'
cv2.imwrite(trans_file, dst_img)
_, dst_points, trans_faces, err = core.face_points(dst_img)
dst_img = morph_img(src_img, src_points, dst_img, dst_points, alpha)
# 融合
# morph_file = 'images/' + "merge" + '.jpg'
# cv2.imwrite(morph_file, dst_img)
dst_matrix, dst_points, morph_faces,err = core.face_points(dst_img)
if isinstance(src_faces,dict):
src_img = tran_src(src_img, src_points, dst_points,
[int(src_faces['x']), int(src_faces['y']), int(src_faces['width']),
int(src_faces['height'])])
else:
src_img = tran_src(src_img, src_points, dst_points, [int(src_faces[-1][0]),int(src_faces[-1][1]),int(src_faces[-1][2]),int(src_faces[-1][3])])
# cv2.imwrite('images/' + "tran_src" + '.jpg',src_img)
dst_img = merge_img(src_img, dst_img, dst_matrix, dst_points, k_size, mat_multiple)
# 删除掉临时生成的文件
# os.remove(trans_file)
# os.remove(morph_file)
cv2.imwrite(out_img, dst_img)
return err
def face_merge_ret(
src_img,
dst_img,
out_img,
alpha=0.75,
k_size=(10,5),
mat_multiple=0.5
):
src_matrix, src_points, src_faces,err = core.face_points(src_img)
if(err != 0 or len(src_points) == 0):
return src_img
##直接将第一次寻找目标人物读取的人脸数据作为参数传过来,减少查询人脸识别API次数
dst_matrix, dst_points, dst_faces,err = core.face_points(dst_img)
if(err != 0 or len(dst_points) == 0):
return src_img
if not (isinstance(src_img,np.ndarray)):
print("read")
src_img = cv2.imread(src_img, cv2.IMREAD_COLOR)
if not (isinstance(dst_img,np.ndarray)):
dst_img = cv2.imread(dst_img, cv2.IMREAD_COLOR)
dst_img = transformation_points(src_img=src_img, src_points=src_matrix[core.FACE_POINTS],
dst_img=dst_img, dst_points=dst_matrix[core.FACE_POINTS])
# 转换
trans_file = 'images/' + "trans"+ '.jpg'
cv2.imwrite(trans_file, dst_img)
_, dst_points, trans_faces, err = core.face_points(dst_img)
dst_img = morph_img(src_img, src_points, dst_img, dst_points, alpha)
# 融合
morph_file = 'images/' + "merge" + '.jpg'
cv2.imwrite(morph_file, dst_img)
dst_matrix, dst_points, morph_faces,err = core.face_points(dst_img)
if isinstance(src_faces,dict):
src_img = tran_src(src_img, src_points, dst_points,
[int(src_faces['x']), int(src_faces['y']), int(src_faces['width']),
int(src_faces['height'])])
else:
src_img = tran_src(src_img, src_points, dst_points, [int(src_faces[-1][0]),int(src_faces[-1][1]),int(src_faces[-1][2]),int(src_faces[-1][3])])
cv2.imwrite('images/' + "tran_src" + '.jpg',src_img)
dst_img = merge_img(src_img, dst_img, dst_matrix, dst_points, k_size, mat_multiple)
cv2.imwrite(out_img,dst_img)
return dst_img | [
"numpy.uint8",
"numpy.hstack",
"core.morph_triangle",
"numpy.array",
"PIL.ImageDraw.Draw",
"numpy.mean",
"core.affine_triangle",
"cv2.blur",
"cv2.warpAffine",
"core.matrix_rectangle",
"cv2.findHomography",
"core.measure_triangle",
"numpy.std",
"numpy.linalg.svd",
"cv2.getRotationMatrix2D... | [((421, 448), 'numpy.mean', 'np.mean', (['src_points'], {'axis': '(0)'}), '(src_points, axis=0)\n', (428, 448), True, 'import numpy as np\n'), ((458, 485), 'numpy.mean', 'np.mean', (['dst_points'], {'axis': '(0)'}), '(dst_points, axis=0)\n', (465, 485), True, 'import numpy as np\n'), ((539, 557), 'numpy.std', 'np.std', (['src_points'], {}), '(src_points)\n', (545, 557), True, 'import numpy as np\n'), ((567, 585), 'numpy.std', 'np.std', (['dst_points'], {}), '(dst_points)\n', (573, 585), True, 'import numpy as np\n'), ((645, 685), 'numpy.linalg.svd', 'np.linalg.svd', (['(src_points.T * dst_points)'], {}), '(src_points.T * dst_points)\n', (658, 685), True, 'import numpy as np\n'), ((821, 956), 'cv2.warpAffine', 'cv2.warpAffine', (['dst_img', 'm[:2]', '(src_img.shape[1], src_img.shape[0])'], {'borderMode': 'cv2.BORDER_TRANSPARENT', 'flags': 'cv2.WARP_INVERSE_MAP'}), '(dst_img, m[:2], (src_img.shape[1], src_img.shape[0]),\n borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP)\n', (835, 956), False, 'import cv2\n'), ((1125, 1167), 'cv2.findHomography', 'cv2.findHomography', (['dst_points', 'src_points'], {}), '(dst_points, src_points)\n', (1143, 1167), False, 'import cv2\n'), ((1181, 1319), 'cv2.warpAffine', 'cv2.warpAffine', (['dst_img', 'h[0][:2]', '(src_img.shape[1], src_img.shape[0])'], {'borderMode': 'cv2.BORDER_TRANSPARENT', 'flags': 'cv2.WARP_INVERSE_MAP'}), '(dst_img, h[0][:2], (src_img.shape[1], src_img.shape[0]),\n borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP)\n', (1195, 1319), False, 'import cv2\n'), ((1701, 1754), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img1', '(blur_amount, blur_amount)', '(0)'], {}), '(img1, (blur_amount, blur_amount), 0)\n', (1717, 1754), False, 'import cv2\n'), ((1771, 1824), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img2', '(blur_amount, blur_amount)', '(0)'], {}), '(img2, (blur_amount, blur_amount), 0)\n', (1787, 1824), False, 'import cv2\n'), ((3413, 3477), 'core.measure_triangle', 'core.measure_triangle', (['src_img', 'dst_list', 'src_points', 'dst_points'], {}), '(src_img, dst_list, src_points, dst_points)\n', (3434, 3477), False, 'import core\n'), ((3491, 3535), 'numpy.zeros', 'np.zeros', (['src_img.shape'], {'dtype': 'src_img.dtype'}), '(src_img.shape, dtype=src_img.dtype)\n', (3499, 3535), True, 'import numpy as np\n'), ((4042, 4086), 'numpy.zeros', 'np.zeros', (['src_img.shape'], {'dtype': 'src_img.dtype'}), '(src_img.shape, dtype=src_img.dtype)\n', (4050, 4086), True, 'import numpy as np\n'), ((4767, 4791), 'PIL.Image.fromarray', 'Image.fromarray', (['src_img'], {}), '(src_img)\n', (4782, 4791), False, 'from PIL import Image, ImageDraw\n'), ((4803, 4821), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im'], {}), '(im)\n', (4817, 4821), False, 'from PIL import Image, ImageDraw\n'), ((5100, 5138), 'numpy.zeros', 'np.zeros', (['src_img.shape', 'src_img.dtype'], {}), '(src_img.shape, src_img.dtype)\n', (5108, 5138), True, 'import numpy as np\n'), ((5461, 5529), 'core.measure_triangle', 'core.measure_triangle', (['src_img', 'morph_points', 'src_points', 'dst_points'], {}), '(src_img, morph_points, src_points, dst_points)\n', (5482, 5529), False, 'import core\n'), ((6314, 6339), 'core.face_points', 'core.face_points', (['src_img'], {}), '(src_img)\n', (6330, 6339), False, 'import core\n'), ((6431, 6456), 'core.face_points', 'core.face_points', (['dst_img'], {}), '(dst_img)\n', (6447, 6456), False, 'import core\n'), ((6901, 6933), 'cv2.imwrite', 'cv2.imwrite', (['trans_file', 'dst_img'], {}), '(trans_file, dst_img)\n', (6912, 6933), False, 'import cv2\n'), ((6973, 6998), 'core.face_points', 'core.face_points', (['dst_img'], {}), '(dst_img)\n', (6989, 6998), False, 'import core\n'), ((7216, 7241), 'core.face_points', 'core.face_points', (['dst_img'], {}), '(dst_img)\n', (7232, 7241), False, 'import core\n'), ((7875, 7904), 'cv2.imwrite', 'cv2.imwrite', (['out_img', 'dst_img'], {}), '(out_img, dst_img)\n', (7886, 7904), False, 'import cv2\n'), ((8142, 8167), 'core.face_points', 'core.face_points', (['src_img'], {}), '(src_img)\n', (8158, 8167), False, 'import core\n'), ((8330, 8355), 'core.face_points', 'core.face_points', (['dst_img'], {}), '(dst_img)\n', (8346, 8355), False, 'import core\n'), ((8911, 8943), 'cv2.imwrite', 'cv2.imwrite', (['trans_file', 'dst_img'], {}), '(trans_file, dst_img)\n', (8922, 8943), False, 'import cv2\n'), ((8983, 9008), 'core.face_points', 'core.face_points', (['dst_img'], {}), '(dst_img)\n', (8999, 9008), False, 'import core\n'), ((9143, 9175), 'cv2.imwrite', 'cv2.imwrite', (['morph_file', 'dst_img'], {}), '(morph_file, dst_img)\n', (9154, 9175), False, 'import cv2\n'), ((9222, 9247), 'core.face_points', 'core.face_points', (['dst_img'], {}), '(dst_img)\n', (9238, 9247), False, 'import core\n'), ((9659, 9712), 'cv2.imwrite', 'cv2.imwrite', (["('images/' + 'tran_src' + '.jpg')", 'src_img'], {}), "('images/' + 'tran_src' + '.jpg', src_img)\n", (9670, 9712), False, 'import cv2\n'), ((9804, 9833), 'cv2.imwrite', 'cv2.imwrite', (['out_img', 'dst_img'], {}), '(out_img, dst_img)\n', (9815, 9833), False, 'import cv2\n'), ((2269, 2332), 'core.matrix_rectangle', 'core.matrix_rectangle', (['(0)', '(0)', 'src_img.shape[1]', 'src_img.shape[0]'], {}), '(0, 0, src_img.shape[1], src_img.shape[0])\n', (2290, 2332), False, 'import core\n'), ((2476, 2539), 'core.matrix_rectangle', 'core.matrix_rectangle', (['(0)', '(0)', 'src_img.shape[1]', 'src_img.shape[0]'], {}), '(0, 0, src_img.shape[1], src_img.shape[0])\n', (2497, 2539), False, 'import core\n'), ((2718, 2738), 'numpy.array', 'np.array', (['jaw_points'], {}), '(jaw_points)\n', (2726, 2738), True, 'import numpy as np\n'), ((4242, 4282), 'numpy.float32', 'np.float32', (['[dst_points[:core.FACE_END]]'], {}), '([dst_points[:core.FACE_END]])\n', (4252, 4282), True, 'import numpy as np\n'), ((4379, 4427), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', '(0)', 'mat_multiple'], {}), '(center, 0, mat_multiple)\n', (4402, 4427), False, 'import cv2\n'), ((4448, 4520), 'cv2.warpAffine', 'cv2.warpAffine', (['face_mask', 'mat', '(face_mask.shape[1], face_mask.shape[0])'], {}), '(face_mask, mat, (face_mask.shape[1], face_mask.shape[0]))\n', (4462, 4520), False, 'import cv2\n'), ((4557, 4592), 'cv2.blur', 'cv2.blur', (['face_mask', 'k_size', 'center'], {}), '(face_mask, k_size, center)\n', (4565, 4592), False, 'import cv2\n'), ((4623, 4640), 'numpy.uint8', 'np.uint8', (['dst_img'], {}), '(dst_img)\n', (4631, 4640), True, 'import numpy as np\n'), ((5860, 5927), 'core.morph_triangle', 'core.morph_triangle', (['src_img', 'dst_img', 'res_img', 't1', 't2', 't', 'alpha', 'i'], {}), '(src_img, dst_img, res_img, t1, t2, t, alpha, i)\n', (5879, 5927), False, 'import core\n'), ((6556, 6593), 'cv2.imread', 'cv2.imread', (['src_img', 'cv2.IMREAD_COLOR'], {}), '(src_img, cv2.IMREAD_COLOR)\n', (6566, 6593), False, 'import cv2\n'), ((6612, 6649), 'cv2.imread', 'cv2.imread', (['dst_img', 'cv2.IMREAD_COLOR'], {}), '(dst_img, cv2.IMREAD_COLOR)\n', (6622, 6649), False, 'import cv2\n'), ((8516, 8553), 'cv2.imread', 'cv2.imread', (['src_img', 'cv2.IMREAD_COLOR'], {}), '(src_img, cv2.IMREAD_COLOR)\n', (8526, 8553), False, 'import cv2\n'), ((8622, 8659), 'cv2.imread', 'cv2.imread', (['dst_img', 'cv2.IMREAD_COLOR'], {}), '(dst_img, cv2.IMREAD_COLOR)\n', (8632, 8659), False, 'import cv2\n'), ((724, 775), 'numpy.hstack', 'np.hstack', (['(s2 / s1 * r, c2.T - s2 / s1 * r * c1.T)'], {}), '((s2 / s1 * r, c2.T - s2 / s1 * r * c1.T))\n', (733, 775), True, 'import numpy as np\n'), ((781, 807), 'numpy.matrix', 'np.matrix', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (790, 807), True, 'import numpy as np\n'), ((2172, 2249), 'core.matrix_rectangle', 'core.matrix_rectangle', (['face_area[0]', 'face_area[1]', 'face_area[2]', 'face_area[3]'], {}), '(face_area[0], face_area[1], face_area[2], face_area[3])\n', (2193, 2249), False, 'import core\n'), ((2379, 2456), 'core.matrix_rectangle', 'core.matrix_rectangle', (['face_area[0]', 'face_area[1]', 'face_area[2]', 'face_area[3]'], {}), '(face_area[0], face_area[1], face_area[2], face_area[3])\n', (2400, 2456), False, 'import core\n'), ((3860, 3912), 'core.affine_triangle', 'core.affine_triangle', (['src_img', 'res_img', 't_src', 't_dst'], {}), '(src_img, res_img, t_src, t_dst)\n', (3880, 3912), False, 'import core\n'), ((4164, 4197), 'cv2.convexHull', 'cv2.convexHull', (['dst_matrix[group]'], {}), '(dst_matrix[group])\n', (4178, 4197), False, 'import cv2\n'), ((1481, 1528), 'numpy.mean', 'np.mean', (['landmark[core.LEFT_EYE_POINTS]'], {'axis': '(0)'}), '(landmark[core.LEFT_EYE_POINTS], axis=0)\n', (1488, 1528), True, 'import numpy as np\n'), ((1539, 1587), 'numpy.mean', 'np.mean', (['landmark[core.RIGHT_EYE_POINTS]'], {'axis': '(0)'}), '(landmark[core.RIGHT_EYE_POINTS], axis=0)\n', (1546, 1587), True, 'import numpy as np\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utils for Cell related computation."""
# pylint: disable=missing-docstring
import numpy as np
from mindspore import ParameterTuple
from mindspore import nn, context
from mindspore.common.api import _executor, ms_function
from mindspore.common.tensor import Tensor
from mindspore.ops import functional as F
from mindspore.ops import operations as P
from mindspore.ops.composite import GradOperation
from . import keyword
def get_uniform_with_shape(shape):
np.random.seed(1)
return np.random.uniform(-0.1, 0.1, size=shape).astype(np.float32)
def set_block_param_with_rand(net, rand_func=None):
if not isinstance(net, nn.Cell) or rand_func is None:
return
net.init_parameters_data()
for param in net.trainable_params():
param.default_input = Tensor(rand_func(param.default_input.asnumpy().shape))
def compile_block(net, *inputs, rand_func=None, training=True):
set_block_training(net, training)
set_block_param_with_rand(net, rand_func)
return _executor.compile(net, *inputs)
def run_block(net, *inputs, rand_func=None, training=True):
set_block_training(net, training)
set_block_param_with_rand(net, rand_func)
if context.get_context("mode") == context.PYNATIVE_MODE:
def func_pynative(*inputs):
@ms_function
def _func_pynative(*inputs):
return net(*inputs)
return _func_pynative(*inputs)
return func_pynative(*inputs)
return net(*inputs)
class IthOutputCell(nn.Cell):
def __init__(self, network, output_index):
if isinstance(network, nn.Cell):
super(IthOutputCell, self).__init__(auto_prefix=False)
else:
super(IthOutputCell, self).__init__()
self.network = network
self.output_index = output_index
def construct(self, *inputs):
predict = self.network(*inputs)[self.output_index]
return predict
def get_output_cell(network, num_input, output_index, training=True):
_ = num_input
net = IthOutputCell(network, output_index)
set_block_training(net, training)
return net
class OutputReduceSumCell(nn.Cell):
def __init__(self, network, output_num):
super(OutputReduceSumCell, self).__init__()
self.output_num = output_num
self.network = network
self.reduce_sum = P.ReduceSum()
def construct(self, *inputs):
if self.output_num == 1:
return self.reduce_sum(self.network(*inputs), None)
ret = F.make_tuple()
for index in range(self.output_num):
predict = self.network(*inputs)[index]
predict_reduce = self.reduce_sum(predict, None)
ret = ret + F.make_tuple(predict_reduce)
return ret
def get_output_reduce_cell(network, output_num, training=True):
net = OutputReduceSumCell(network, output_num)
set_block_training(net, training)
return net
class InputOpNet(nn.Cell):
def __init__(self, op, c1=None, c2=None, c3=None, c4=None):
super(InputOpNet, self).__init__()
self.op = op
self.c1 = c1
self.c2 = c2
self.c3 = c3
self.c4 = c4
def construct(self, *inputs):
raise NotImplementedError
def construct0_c0_fake(self, data):
x = self.op() + data
return x
def construct0_c1_fake(self, data):
x = self.op(self.c1) + data
return x
def construct0_c2_fake(self, data):
x = self.op(self.c1, self.c2) + data
return x
def construct0_c3_fake(self, data):
x = self.op(self.c1, self.c2, self.c3) + data
return x
def construct0_c0(self):
x = self.op()
return x
def construct0_c1(self):
x = self.op(self.c1)
return x
def construct0_c2(self):
x = self.op(self.c1, self.c2)
return x
def construct1_c0(self, x1):
x = self.op(x1)
return x
def construct1_c1(self, x1):
x = self.op(x1, self.c1)
return x
def construct1_c2(self, x1):
x = self.op(x1, self.c1, self.c2)
return x
def construct1_c3(self, x1):
x = self.op(x1, self.c1, self.c2, self.c3)
return x
def construct1_c4(self, x1):
x = self.op(x1, self.c1, self.c2, self.c3, self.c4)
return x
def constructc1_1(self, x1):
x = self.op(self.c1, x1)
return x
def construct2_c0(self, x1, x2):
x = self.op(x1, x2)
return x
def construct2_c1(self, x1, x2):
x = self.op(x1, x2, self.c1)
return x
def construct2_c3(self, x1, x2):
x = self.op(x1, x2, self.c1, self.c2, self.c3)
return x
def construct3_c0(self, x1, x2, x3):
x = self.op(x1, x2, x3)
return x
def construct3_c1(self, x1, x2, x3):
x = self.op(x1, x2, x3, self.c1)
return x
def construct4_c0(self, x1, x2, x3, x4):
x = self.op(x1, x2, x3, x4)
return x
def construct4_c1(self, x1, x2, x3, x4):
x = self.op(x1, x2, x3, x4, self.c1)
return x
def construct4_c2(self, x1, x2, x3, x4):
x = self.op(x1, x2, x3, x4, self.c1, self.c2)
return x
def construct4_c4(self, x1, x2, x3, x4):
x = self.op(x1, x2, x3, x4, self.c1, self.c2, self.c3, self.c4)
return x
def construct5_c0(self, x1, x2, x3, x4, x5):
x = self.op(x1, x2, x3, x4, x5)
return x
def construct6_c0(self, x1, x2, x3, x4, x5, x6):
x = self.op(x1, x2, x3, x4, x5, x6)
return x
def construct5_c1(self, x1, x2, x3, x4, x5):
x = self.op(x1, x2, x3, x4, x5, self.c1)
return x
def construct5_c4(self, x1, x2, x3, x4, x5):
x = self.op(x1, x2, x3, x4, x5, self.c1, self.c2, self.c3, self.c4)
return x
def gen_net(op, input_num, training=True, desc_const=(), const_first=False, add_fake_input=False):
if isinstance(op, nn.Cell):
return op
net = InputOpNet(op, *desc_const)
if const_first:
fn_name = 'constructc%d_%d' % (len(desc_const), input_num)
else:
fn_name = 'construct%d_c%d' % (input_num, len(desc_const))
if add_fake_input:
fn_name += '_fake'
f = getattr(net, fn_name)
setattr(net, "construct", f)
set_block_training(net, training)
return net
class OperationBackward(nn.Cell):
def __init__(self, network, grad_op, sens):
if isinstance(network, nn.Cell):
super(OperationBackward, self).__init__(auto_prefix=False)
else:
super(OperationBackward, self).__init__()
self.network = network
self.grad = grad_op
self.sens = sens
def construct(self, *inputs):
return self.grad(self.network)(*inputs, self.sens)
class OperationBackwardWithNoSens(nn.Cell):
def __init__(self, network, grad_op):
if isinstance(network, nn.Cell):
super(OperationBackwardWithNoSens, self).__init__(auto_prefix=False)
else:
super(OperationBackwardWithNoSens, self).__init__()
self.network = network
self.grad = grad_op
def construct(self, *inputs):
return self.grad(self.network)(*inputs)
class NNBackward(nn.Cell):
def __init__(self, network, grad_op, sens):
if isinstance(network, nn.Cell):
super(NNBackward, self).__init__(auto_prefix=False)
else:
super(NNBackward, self).__init__()
self.network = network
self.grad = grad_op
self.params = ParameterTuple(network.trainable_params())
self.sens = sens
def construct(self, *inputs):
return self.grad(self.network, self.params)(*inputs, self.sens)
class NNBackwardWithNoSens(nn.Cell):
def __init__(self, network, grad_op):
if isinstance(network, nn.Cell):
super(NNBackwardWithNoSens, self).__init__(auto_prefix=False)
else:
super(NNBackwardWithNoSens, self).__init__()
self.network = network
self.grad = grad_op
self.params = ParameterTuple(network.trainable_params())
def construct(self, *inputs):
return self.grad(self.network, self.params)(*inputs)
def gen_grad_net(net, grad_op, input_num, sens=None, training=True, desc_const=(),
const_first=False, add_fake_input=False):
if not isinstance(net, nn.Cell):
net = gen_net(net, input_num, desc_const=desc_const, const_first=const_first, add_fake_input=add_fake_input)
if grad_op.get_by_list:
if grad_op.sens_param:
net = NNBackward(net, grad_op, sens)
else:
net = NNBackwardWithNoSens(net, grad_op)
else:
if grad_op.sens_param:
net = OperationBackward(net, grad_op, sens)
else:
net = OperationBackwardWithNoSens(net, grad_op)
set_block_training(net, training)
return net
def set_block_training(net, training=True):
if isinstance(net, nn.Cell):
net.set_train(training)
def set_block_phase(net, phase='train'):
if isinstance(net, nn.Cell):
net.phase = phase
def create_funcs(verification_set, block_generator, block_runner, grad_op=None, default_rand_func=None):
def create_func(block, num_outputs, rand_func, desc_const, const_first, add_fake_input, split_outputs):
def function(*inputs):
# gradient
if grad_op:
if num_outputs == 0:
grad_op_ = GradOperation(get_all=grad_op.get_all,
get_by_list=grad_op.get_by_list, sens_param=False)
b = block_generator(block, grad_op_, len(inputs), desc_const=desc_const,
const_first=const_first, add_fake_input=add_fake_input)
return block_runner(b, *inputs, rand_func=rand_func)
if num_outputs == 1:
b = block_generator(block, grad_op, len(inputs) - 1, inputs[-1], desc_const=desc_const,
const_first=const_first, add_fake_input=add_fake_input)
return block_runner(b, *(inputs[:-1]), rand_func=rand_func)
if split_outputs:
block_inputs = inputs[0:len(inputs) - num_outputs]
sens_inputs = inputs[len(inputs) - num_outputs:]
ret = []
for i in range(num_outputs):
bi_inputs = list(block_inputs)
bi = get_output_cell(block, len(block_inputs), i)
bi = block_generator(bi, grad_op, len(bi_inputs), sens_inputs[i], desc_const=desc_const,
const_first=const_first, add_fake_input=add_fake_input)
grads_i = block_runner(bi, *bi_inputs, rand_func=rand_func)
if isinstance(grads_i, tuple):
ret.extend(grads_i)
else:
ret.append(grads_i)
return ret
block_inputs = inputs[0:len(inputs) - num_outputs]
sens_inputs = tuple(inputs[len(inputs) - num_outputs:])
b = block_generator(block, grad_op, len(block_inputs), sens_inputs, desc_const=desc_const,
const_first=const_first, add_fake_input=add_fake_input)
return block_runner(b, *block_inputs, rand_func=rand_func)
# forward
inputs_num = len(inputs)
if add_fake_input and inputs_num == 1:
# input is faked
inputs_num = 0
b = block_generator(block, inputs_num, desc_const=desc_const, const_first=const_first,
add_fake_input=add_fake_input)
return block_runner(b, *inputs, rand_func=rand_func)
return function
bc_configs = verification_set[keyword.function]
for config in bc_configs:
block = config[keyword.block]
rand_func = config.get(keyword.init_param_with, default_rand_func)
num_outputs = config.get(keyword.num_outputs, 0)
desc_const = config.get(keyword.desc_const, [])
const_first = config.get(keyword.const_first, False)
add_fake_input = config.get(keyword.add_fake_input, False)
split_outputs = config.get(keyword.split_outputs, True)
config[keyword.block] = create_func(block, num_outputs, rand_func, desc_const,
const_first, add_fake_input, split_outputs)
return bc_configs
| [
"mindspore.context.get_context",
"mindspore.ops.operations.ReduceSum",
"mindspore.ops.functional.make_tuple",
"mindspore.ops.composite.GradOperation",
"numpy.random.seed",
"mindspore.common.api._executor.compile",
"numpy.random.uniform"
] | [((1135, 1152), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1149, 1152), True, 'import numpy as np\n'), ((1669, 1700), 'mindspore.common.api._executor.compile', '_executor.compile', (['net', '*inputs'], {}), '(net, *inputs)\n', (1686, 1700), False, 'from mindspore.common.api import _executor, ms_function\n'), ((1854, 1881), 'mindspore.context.get_context', 'context.get_context', (['"""mode"""'], {}), "('mode')\n", (1873, 1881), False, 'from mindspore import nn, context\n'), ((3012, 3025), 'mindspore.ops.operations.ReduceSum', 'P.ReduceSum', ([], {}), '()\n', (3023, 3025), True, 'from mindspore.ops import operations as P\n'), ((3172, 3186), 'mindspore.ops.functional.make_tuple', 'F.make_tuple', ([], {}), '()\n', (3184, 3186), True, 'from mindspore.ops import functional as F\n'), ((1164, 1204), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)'], {'size': 'shape'}), '(-0.1, 0.1, size=shape)\n', (1181, 1204), True, 'import numpy as np\n'), ((3367, 3395), 'mindspore.ops.functional.make_tuple', 'F.make_tuple', (['predict_reduce'], {}), '(predict_reduce)\n', (3379, 3395), True, 'from mindspore.ops import functional as F\n'), ((10134, 10227), 'mindspore.ops.composite.GradOperation', 'GradOperation', ([], {'get_all': 'grad_op.get_all', 'get_by_list': 'grad_op.get_by_list', 'sens_param': '(False)'}), '(get_all=grad_op.get_all, get_by_list=grad_op.get_by_list,\n sens_param=False)\n', (10147, 10227), False, 'from mindspore.ops.composite import GradOperation\n')] |
from deep_tobit.util import to_numpy, to_torch
import torch as t
from scipy.stats import norm
import numpy as np
from deep_tobit.util import normalize
class __CDF(t.autograd.Function):
@staticmethod
def forward(ctx, x: t.Tensor) -> t.Tensor:
type, device = x.dtype, x.device
_x = to_numpy(x)
pdf = to_torch(norm.pdf(_x), type = type, device = device, grad = False)
ctx.save_for_backward(pdf)
return to_torch(norm.cdf(_x), type = type, device = device, grad = False)
@staticmethod
def backward(ctx, grad_output):
pdf = ctx.saved_tensors[0]
grad = None
if ctx.needs_input_grad[0]:
grad = grad_output * pdf
return grad
cdf = __CDF.apply
if __name__ == '__main__':
input = [10, 15, 20, 25, 30]
# manual gradient computing
x = np.array(input)
mean, std = x.mean(), x.std()
x_normalized = normalize(x, mean, std)
expected_cdf = norm.cdf(x_normalized)
expected_log_likelihood = np.log(expected_cdf)
expected_grad_log_likelihood_by_x = norm.pdf(x_normalized) / (expected_cdf * std)
# automatic gradient computing
x = to_torch(input, grad=True)
# in this test mean & std are considered constants
x_normalized = normalize(x, mean, std)
cdf_result = cdf(x_normalized)
log_likelihood_result = t.log(cdf_result)
loss = t.sum(log_likelihood_result)
loss.backward()
print(x.grad, expected_grad_log_likelihood_by_x) | [
"torch.log",
"numpy.log",
"deep_tobit.util.to_torch",
"deep_tobit.util.normalize",
"numpy.array",
"torch.sum",
"scipy.stats.norm.pdf",
"deep_tobit.util.to_numpy",
"scipy.stats.norm.cdf"
] | [((840, 855), 'numpy.array', 'np.array', (['input'], {}), '(input)\n', (848, 855), True, 'import numpy as np\n'), ((909, 932), 'deep_tobit.util.normalize', 'normalize', (['x', 'mean', 'std'], {}), '(x, mean, std)\n', (918, 932), False, 'from deep_tobit.util import normalize\n'), ((952, 974), 'scipy.stats.norm.cdf', 'norm.cdf', (['x_normalized'], {}), '(x_normalized)\n', (960, 974), False, 'from scipy.stats import norm\n'), ((1005, 1025), 'numpy.log', 'np.log', (['expected_cdf'], {}), '(expected_cdf)\n', (1011, 1025), True, 'import numpy as np\n'), ((1156, 1182), 'deep_tobit.util.to_torch', 'to_torch', (['input'], {'grad': '(True)'}), '(input, grad=True)\n', (1164, 1182), False, 'from deep_tobit.util import to_numpy, to_torch\n'), ((1257, 1280), 'deep_tobit.util.normalize', 'normalize', (['x', 'mean', 'std'], {}), '(x, mean, std)\n', (1266, 1280), False, 'from deep_tobit.util import normalize\n'), ((1345, 1362), 'torch.log', 't.log', (['cdf_result'], {}), '(cdf_result)\n', (1350, 1362), True, 'import torch as t\n'), ((1375, 1403), 'torch.sum', 't.sum', (['log_likelihood_result'], {}), '(log_likelihood_result)\n', (1380, 1403), True, 'import torch as t\n'), ((306, 317), 'deep_tobit.util.to_numpy', 'to_numpy', (['x'], {}), '(x)\n', (314, 317), False, 'from deep_tobit.util import to_numpy, to_torch\n'), ((1066, 1088), 'scipy.stats.norm.pdf', 'norm.pdf', (['x_normalized'], {}), '(x_normalized)\n', (1074, 1088), False, 'from scipy.stats import norm\n'), ((341, 353), 'scipy.stats.norm.pdf', 'norm.pdf', (['_x'], {}), '(_x)\n', (349, 353), False, 'from scipy.stats import norm\n'), ((458, 470), 'scipy.stats.norm.cdf', 'norm.cdf', (['_x'], {}), '(_x)\n', (466, 470), False, 'from scipy.stats import norm\n')] |
import pytest
from lazydiff import ops
from lazydiff.vars import Var
import numpy as np
def test_sin():
var1 = Var([np.pi, np.pi])
var2 = ops.sin(var1)
var2.backward()
assert var2.val == pytest.approx([0, 0])
assert np.all(var2.grad(var1) == np.array([-1, -1]))
def test_cos():
var1 = Var([np.pi, np.pi])
var2 = ops.cos(var1)
var2.backward()
assert var2.val == pytest.approx([-1, -1])
assert np.array(var2.grad(var1)) == pytest.approx([0, 0])
def test_tan():
var1 = Var([0, 0])
var2 = ops.tan(var1)
var2.backward()
assert np.all(var2.val == [0, 0])
assert np.all(var2.grad(var1) == [1, 1])
def test_asin():
var1 = Var([0, 0])
var2 = ops.arcsin(var1)
var2.backward()
assert np.all(var2.val == [0, 0])
assert np.all(var2.grad(var1) == [1, 1])
def test_acos():
var1 = Var([0, 0])
var2 = ops.arccos(var1)
var2.backward()
assert np.all(var2.val == np.arccos([0, 0]))
assert np.all(var2.grad(var1) == [-1, -1])
def test_atan():
var1 = Var([0, 0])
var2 = ops.arctan(var1)
var2.backward()
assert np.all(var2.val == [0, 0])
assert np.all(var2.grad(var1) == [1, 1])
def test_sinh():
var1 = Var([0, 0])
var2 = ops.sinh(var1)
var2.backward()
assert np.all(var2.val == [0, 0])
assert np.all(var2.grad(var1) == [1, 1])
def test_cosh():
var1 = Var([0, 0])
var2 = ops.cosh(var1)
var2.backward()
assert np.all(var2.val == [1, 1])
assert np.all(var2.grad(var1) == [0, 0])
def test_tanh():
var1 = Var([0, 0])
var2 = ops.tanh(var1)
var2.backward()
assert np.all(var2.val == [0, 0])
assert np.all(var2.grad(var1) == [1, 1])
def test_asinh():
var1 = Var([0, 0])
var2 = ops.arcsinh(var1)
var2.backward()
assert np.all(var2.val == [0, 0])
assert np.all(var2.grad(var1) == [1, 1])
def test_acosh():
var1 = Var([2, 2])
var2 = ops.arccosh(var1)
var2.backward()
assert np.all(var2.val == np.arccosh([2, 2]))
assert np.all(var2.grad(var1) == np.array([1, 1]) / np.sqrt(3))
def test_atanh():
var1 = Var([0, 0])
var2 = ops.arctanh(var1)
var2.backward()
assert np.all(var2.val == [0, 0])
assert np.all(var2.grad(var1) == [1, 1])
def test_exp():
var1 = Var([0, 0])
var2 = ops.exp(var1)
var2.backward()
assert np.all(var2.val == [1, 1])
assert np.all(var2.grad(var1) == [1, 1])
def test_log():
var1 = Var([1., 1.])
var2 = ops.log(var1)
var2.backward()
assert np.all(var2.val == [0, 0])
assert np.all(var2.grad(var1) == [1, 1])
def test_logistic():
var1 = Var([0, 0])
var2 = ops.logistic(var1)
var2.backward()
assert np.all(var2.val == [.5, .5])
assert np.all(var2.grad(var1) == (var2.val * (1 - var2.val)))
def test_sqrt():
var1 = Var([4, 4])
var2 = ops.sqrt(var1)
var2.backward()
assert np.all(var2.val == [2, 2])
assert np.all(var2.grad(var1) == [.5 * 1 / var2.val, .5 * 1 / var2.val])
def test_neg():
var1 = Var([1, 1])
var2 = ops.neg(var1)
var2.backward()
assert np.all(var2.val == [-1, -1])
assert np.all(var2.grad(var1) == [-1, -1])
def test_add():
var1 = Var([1, 1])
var2 = Var([1, 1])
var3 = ops.add(var1, var2)
var3.backward()
assert np.all(var3.val == [2, 2])
assert np.all(var3.grad(var1) == [1, 1])
assert np.all(var3.grad(var2) == [1, 1])
def test_sub():
var1 = Var([1, 1])
var2 = Var([1, 1])
var3 = ops.sub(var1, var2)
var3.backward()
assert np.all(var3.val == [0, 0])
assert np.all(var3.grad(var1) == [1, 1])
assert np.all(var3.grad(var2) == [-1, -1])
def test_mul():
var1 = Var([1, 1])
var2 = Var([1, 1])
var3 = ops.mul(var1, var2)
var3.backward()
assert np.all(var3.val == [1, 1])
assert np.all(var3.grad(var1) == var2.val)
assert np.all(var3.grad(var2) == var1.val)
def test_div():
var1 = Var([1, 1])
var2 = Var([1, 1])
var3 = ops.div(var1, var2)
var3.backward()
assert np.all(var3.val == [1, 1])
assert np.all(var3.grad(var1) == [1, 1])
assert np.all(var3.grad(var2) == [-1, -1])
def test_pow():
var1 = Var([1, 1])
var2 = Var([1, 1])
var3 = ops.pow(var1, var2)
var3.backward()
assert np.all(var3.val == [1, 1])
assert np.all(var3.grad(var1) == [1, 1])
assert np.all(var3.grad(var2) == [0, 0])
def test_abs():
var1 = Var([-1, -1])
var2 = ops.abs(var1)
var2.backward()
assert np.all(var2.val == [1, 1])
assert np.all(var2.grad(var1) == [-1, -1])
def test_sum():
var1 = Var([2, 2, 2, 2, 2])
var2 = ops.sum(var1)
var2.backward()
assert var2.val == 10.
assert np.all(var2.grad(var1) == [1, 1, 1, 1, 1])
def test_norm():
var1 = Var([1, 2, 3])
var2 = ops.norm(var1, p=2)
var2.backward()
assert var2.val == np.linalg.norm(var1.val)
assert np.all(var2.grad(var1) == [1/np.sqrt(14), np.sqrt(2/7), 3/np.sqrt(14)])
def test_composite_logexp():
x = Var([5, 10, 15, 20])
y = ops.log(ops.exp(x))
y.backward()
assert np.all(x == y)
assert np.all(y.grad(x) == 1)
def test_composite_trig():
x = Var([5, 10, 15, 20])
x2 = ops.sin(x) / ops.cos(x)
x3 = ops.tan(x)
x.forward()
assert np.all(x2.val == pytest.approx(x3.val))
assert np.all(x2.grad(x) == pytest.approx(x3.grad(x)))
| [
"numpy.arccos",
"numpy.sqrt",
"lazydiff.ops.sqrt",
"lazydiff.ops.tanh",
"numpy.array",
"numpy.linalg.norm",
"lazydiff.ops.div",
"lazydiff.ops.sum",
"lazydiff.ops.arctanh",
"lazydiff.ops.norm",
"lazydiff.ops.arcsin",
"lazydiff.ops.abs",
"numpy.arccosh",
"lazydiff.ops.exp",
"lazydiff.ops.s... | [((116, 135), 'lazydiff.vars.Var', 'Var', (['[np.pi, np.pi]'], {}), '([np.pi, np.pi])\n', (119, 135), False, 'from lazydiff.vars import Var\n'), ((147, 160), 'lazydiff.ops.sin', 'ops.sin', (['var1'], {}), '(var1)\n', (154, 160), False, 'from lazydiff import ops\n'), ((311, 330), 'lazydiff.vars.Var', 'Var', (['[np.pi, np.pi]'], {}), '([np.pi, np.pi])\n', (314, 330), False, 'from lazydiff.vars import Var\n'), ((342, 355), 'lazydiff.ops.cos', 'ops.cos', (['var1'], {}), '(var1)\n', (349, 355), False, 'from lazydiff import ops\n'), ((513, 524), 'lazydiff.vars.Var', 'Var', (['[0, 0]'], {}), '([0, 0])\n', (516, 524), False, 'from lazydiff.vars import Var\n'), ((536, 549), 'lazydiff.ops.tan', 'ops.tan', (['var1'], {}), '(var1)\n', (543, 549), False, 'from lazydiff import ops\n'), ((581, 607), 'numpy.all', 'np.all', (['(var2.val == [0, 0])'], {}), '(var2.val == [0, 0])\n', (587, 607), True, 'import numpy as np\n'), ((682, 693), 'lazydiff.vars.Var', 'Var', (['[0, 0]'], {}), '([0, 0])\n', (685, 693), False, 'from lazydiff.vars import Var\n'), ((705, 721), 'lazydiff.ops.arcsin', 'ops.arcsin', (['var1'], {}), '(var1)\n', (715, 721), False, 'from lazydiff import ops\n'), ((753, 779), 'numpy.all', 'np.all', (['(var2.val == [0, 0])'], {}), '(var2.val == [0, 0])\n', (759, 779), True, 'import numpy as np\n'), ((854, 865), 'lazydiff.vars.Var', 'Var', (['[0, 0]'], {}), '([0, 0])\n', (857, 865), False, 'from lazydiff.vars import Var\n'), ((877, 893), 'lazydiff.ops.arccos', 'ops.arccos', (['var1'], {}), '(var1)\n', (887, 893), False, 'from lazydiff import ops\n'), ((1039, 1050), 'lazydiff.vars.Var', 'Var', (['[0, 0]'], {}), '([0, 0])\n', (1042, 1050), False, 'from lazydiff.vars import Var\n'), ((1062, 1078), 'lazydiff.ops.arctan', 'ops.arctan', (['var1'], {}), '(var1)\n', (1072, 1078), False, 'from lazydiff import ops\n'), ((1110, 1136), 'numpy.all', 'np.all', (['(var2.val == [0, 0])'], {}), '(var2.val == [0, 0])\n', (1116, 1136), True, 'import numpy as np\n'), ((1211, 1222), 'lazydiff.vars.Var', 'Var', (['[0, 0]'], {}), '([0, 0])\n', (1214, 1222), False, 'from lazydiff.vars import Var\n'), ((1234, 1248), 'lazydiff.ops.sinh', 'ops.sinh', (['var1'], {}), '(var1)\n', (1242, 1248), False, 'from lazydiff import ops\n'), ((1280, 1306), 'numpy.all', 'np.all', (['(var2.val == [0, 0])'], {}), '(var2.val == [0, 0])\n', (1286, 1306), True, 'import numpy as np\n'), ((1381, 1392), 'lazydiff.vars.Var', 'Var', (['[0, 0]'], {}), '([0, 0])\n', (1384, 1392), False, 'from lazydiff.vars import Var\n'), ((1404, 1418), 'lazydiff.ops.cosh', 'ops.cosh', (['var1'], {}), '(var1)\n', (1412, 1418), False, 'from lazydiff import ops\n'), ((1450, 1476), 'numpy.all', 'np.all', (['(var2.val == [1, 1])'], {}), '(var2.val == [1, 1])\n', (1456, 1476), True, 'import numpy as np\n'), ((1551, 1562), 'lazydiff.vars.Var', 'Var', (['[0, 0]'], {}), '([0, 0])\n', (1554, 1562), False, 'from lazydiff.vars import Var\n'), ((1574, 1588), 'lazydiff.ops.tanh', 'ops.tanh', (['var1'], {}), '(var1)\n', (1582, 1588), False, 'from lazydiff import ops\n'), ((1620, 1646), 'numpy.all', 'np.all', (['(var2.val == [0, 0])'], {}), '(var2.val == [0, 0])\n', (1626, 1646), True, 'import numpy as np\n'), ((1722, 1733), 'lazydiff.vars.Var', 'Var', (['[0, 0]'], {}), '([0, 0])\n', (1725, 1733), False, 'from lazydiff.vars import Var\n'), ((1745, 1762), 'lazydiff.ops.arcsinh', 'ops.arcsinh', (['var1'], {}), '(var1)\n', (1756, 1762), False, 'from lazydiff import ops\n'), ((1794, 1820), 'numpy.all', 'np.all', (['(var2.val == [0, 0])'], {}), '(var2.val == [0, 0])\n', (1800, 1820), True, 'import numpy as np\n'), ((1896, 1907), 'lazydiff.vars.Var', 'Var', (['[2, 2]'], {}), '([2, 2])\n', (1899, 1907), False, 'from lazydiff.vars import Var\n'), ((1919, 1936), 'lazydiff.ops.arccosh', 'ops.arccosh', (['var1'], {}), '(var1)\n', (1930, 1936), False, 'from lazydiff import ops\n'), ((2105, 2116), 'lazydiff.vars.Var', 'Var', (['[0, 0]'], {}), '([0, 0])\n', (2108, 2116), False, 'from lazydiff.vars import Var\n'), ((2128, 2145), 'lazydiff.ops.arctanh', 'ops.arctanh', (['var1'], {}), '(var1)\n', (2139, 2145), False, 'from lazydiff import ops\n'), ((2177, 2203), 'numpy.all', 'np.all', (['(var2.val == [0, 0])'], {}), '(var2.val == [0, 0])\n', (2183, 2203), True, 'import numpy as np\n'), ((2277, 2288), 'lazydiff.vars.Var', 'Var', (['[0, 0]'], {}), '([0, 0])\n', (2280, 2288), False, 'from lazydiff.vars import Var\n'), ((2300, 2313), 'lazydiff.ops.exp', 'ops.exp', (['var1'], {}), '(var1)\n', (2307, 2313), False, 'from lazydiff import ops\n'), ((2345, 2371), 'numpy.all', 'np.all', (['(var2.val == [1, 1])'], {}), '(var2.val == [1, 1])\n', (2351, 2371), True, 'import numpy as np\n'), ((2445, 2460), 'lazydiff.vars.Var', 'Var', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (2448, 2460), False, 'from lazydiff.vars import Var\n'), ((2470, 2483), 'lazydiff.ops.log', 'ops.log', (['var1'], {}), '(var1)\n', (2477, 2483), False, 'from lazydiff import ops\n'), ((2515, 2541), 'numpy.all', 'np.all', (['(var2.val == [0, 0])'], {}), '(var2.val == [0, 0])\n', (2521, 2541), True, 'import numpy as np\n'), ((2620, 2631), 'lazydiff.vars.Var', 'Var', (['[0, 0]'], {}), '([0, 0])\n', (2623, 2631), False, 'from lazydiff.vars import Var\n'), ((2643, 2661), 'lazydiff.ops.logistic', 'ops.logistic', (['var1'], {}), '(var1)\n', (2655, 2661), False, 'from lazydiff import ops\n'), ((2693, 2723), 'numpy.all', 'np.all', (['(var2.val == [0.5, 0.5])'], {}), '(var2.val == [0.5, 0.5])\n', (2699, 2723), True, 'import numpy as np\n'), ((2817, 2828), 'lazydiff.vars.Var', 'Var', (['[4, 4]'], {}), '([4, 4])\n', (2820, 2828), False, 'from lazydiff.vars import Var\n'), ((2840, 2854), 'lazydiff.ops.sqrt', 'ops.sqrt', (['var1'], {}), '(var1)\n', (2848, 2854), False, 'from lazydiff import ops\n'), ((2886, 2912), 'numpy.all', 'np.all', (['(var2.val == [2, 2])'], {}), '(var2.val == [2, 2])\n', (2892, 2912), True, 'import numpy as np\n'), ((3018, 3029), 'lazydiff.vars.Var', 'Var', (['[1, 1]'], {}), '([1, 1])\n', (3021, 3029), False, 'from lazydiff.vars import Var\n'), ((3041, 3054), 'lazydiff.ops.neg', 'ops.neg', (['var1'], {}), '(var1)\n', (3048, 3054), False, 'from lazydiff import ops\n'), ((3086, 3114), 'numpy.all', 'np.all', (['(var2.val == [-1, -1])'], {}), '(var2.val == [-1, -1])\n', (3092, 3114), True, 'import numpy as np\n'), ((3190, 3201), 'lazydiff.vars.Var', 'Var', (['[1, 1]'], {}), '([1, 1])\n', (3193, 3201), False, 'from lazydiff.vars import Var\n'), ((3213, 3224), 'lazydiff.vars.Var', 'Var', (['[1, 1]'], {}), '([1, 1])\n', (3216, 3224), False, 'from lazydiff.vars import Var\n'), ((3236, 3255), 'lazydiff.ops.add', 'ops.add', (['var1', 'var2'], {}), '(var1, var2)\n', (3243, 3255), False, 'from lazydiff import ops\n'), ((3287, 3313), 'numpy.all', 'np.all', (['(var3.val == [2, 2])'], {}), '(var3.val == [2, 2])\n', (3293, 3313), True, 'import numpy as np\n'), ((3432, 3443), 'lazydiff.vars.Var', 'Var', (['[1, 1]'], {}), '([1, 1])\n', (3435, 3443), False, 'from lazydiff.vars import Var\n'), ((3455, 3466), 'lazydiff.vars.Var', 'Var', (['[1, 1]'], {}), '([1, 1])\n', (3458, 3466), False, 'from lazydiff.vars import Var\n'), ((3478, 3497), 'lazydiff.ops.sub', 'ops.sub', (['var1', 'var2'], {}), '(var1, var2)\n', (3485, 3497), False, 'from lazydiff import ops\n'), ((3529, 3555), 'numpy.all', 'np.all', (['(var3.val == [0, 0])'], {}), '(var3.val == [0, 0])\n', (3535, 3555), True, 'import numpy as np\n'), ((3676, 3687), 'lazydiff.vars.Var', 'Var', (['[1, 1]'], {}), '([1, 1])\n', (3679, 3687), False, 'from lazydiff.vars import Var\n'), ((3699, 3710), 'lazydiff.vars.Var', 'Var', (['[1, 1]'], {}), '([1, 1])\n', (3702, 3710), False, 'from lazydiff.vars import Var\n'), ((3722, 3741), 'lazydiff.ops.mul', 'ops.mul', (['var1', 'var2'], {}), '(var1, var2)\n', (3729, 3741), False, 'from lazydiff import ops\n'), ((3773, 3799), 'numpy.all', 'np.all', (['(var3.val == [1, 1])'], {}), '(var3.val == [1, 1])\n', (3779, 3799), True, 'import numpy as np\n'), ((3922, 3933), 'lazydiff.vars.Var', 'Var', (['[1, 1]'], {}), '([1, 1])\n', (3925, 3933), False, 'from lazydiff.vars import Var\n'), ((3945, 3956), 'lazydiff.vars.Var', 'Var', (['[1, 1]'], {}), '([1, 1])\n', (3948, 3956), False, 'from lazydiff.vars import Var\n'), ((3968, 3987), 'lazydiff.ops.div', 'ops.div', (['var1', 'var2'], {}), '(var1, var2)\n', (3975, 3987), False, 'from lazydiff import ops\n'), ((4019, 4045), 'numpy.all', 'np.all', (['(var3.val == [1, 1])'], {}), '(var3.val == [1, 1])\n', (4025, 4045), True, 'import numpy as np\n'), ((4166, 4177), 'lazydiff.vars.Var', 'Var', (['[1, 1]'], {}), '([1, 1])\n', (4169, 4177), False, 'from lazydiff.vars import Var\n'), ((4189, 4200), 'lazydiff.vars.Var', 'Var', (['[1, 1]'], {}), '([1, 1])\n', (4192, 4200), False, 'from lazydiff.vars import Var\n'), ((4212, 4231), 'lazydiff.ops.pow', 'ops.pow', (['var1', 'var2'], {}), '(var1, var2)\n', (4219, 4231), False, 'from lazydiff import ops\n'), ((4263, 4289), 'numpy.all', 'np.all', (['(var3.val == [1, 1])'], {}), '(var3.val == [1, 1])\n', (4269, 4289), True, 'import numpy as np\n'), ((4408, 4421), 'lazydiff.vars.Var', 'Var', (['[-1, -1]'], {}), '([-1, -1])\n', (4411, 4421), False, 'from lazydiff.vars import Var\n'), ((4433, 4446), 'lazydiff.ops.abs', 'ops.abs', (['var1'], {}), '(var1)\n', (4440, 4446), False, 'from lazydiff import ops\n'), ((4478, 4504), 'numpy.all', 'np.all', (['(var2.val == [1, 1])'], {}), '(var2.val == [1, 1])\n', (4484, 4504), True, 'import numpy as np\n'), ((4580, 4600), 'lazydiff.vars.Var', 'Var', (['[2, 2, 2, 2, 2]'], {}), '([2, 2, 2, 2, 2])\n', (4583, 4600), False, 'from lazydiff.vars import Var\n'), ((4612, 4625), 'lazydiff.ops.sum', 'ops.sum', (['var1'], {}), '(var1)\n', (4619, 4625), False, 'from lazydiff import ops\n'), ((4756, 4770), 'lazydiff.vars.Var', 'Var', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (4759, 4770), False, 'from lazydiff.vars import Var\n'), ((4782, 4801), 'lazydiff.ops.norm', 'ops.norm', (['var1'], {'p': '(2)'}), '(var1, p=2)\n', (4790, 4801), False, 'from lazydiff import ops\n'), ((4991, 5011), 'lazydiff.vars.Var', 'Var', (['[5, 10, 15, 20]'], {}), '([5, 10, 15, 20])\n', (4994, 5011), False, 'from lazydiff.vars import Var\n'), ((5068, 5082), 'numpy.all', 'np.all', (['(x == y)'], {}), '(x == y)\n', (5074, 5082), True, 'import numpy as np\n'), ((5153, 5173), 'lazydiff.vars.Var', 'Var', (['[5, 10, 15, 20]'], {}), '([5, 10, 15, 20])\n', (5156, 5173), False, 'from lazydiff.vars import Var\n'), ((5216, 5226), 'lazydiff.ops.tan', 'ops.tan', (['x'], {}), '(x)\n', (5223, 5226), False, 'from lazydiff import ops\n'), ((204, 225), 'pytest.approx', 'pytest.approx', (['[0, 0]'], {}), '([0, 0])\n', (217, 225), False, 'import pytest\n'), ((399, 422), 'pytest.approx', 'pytest.approx', (['[-1, -1]'], {}), '([-1, -1])\n', (412, 422), False, 'import pytest\n'), ((463, 484), 'pytest.approx', 'pytest.approx', (['[0, 0]'], {}), '([0, 0])\n', (476, 484), False, 'import pytest\n'), ((4845, 4869), 'numpy.linalg.norm', 'np.linalg.norm', (['var1.val'], {}), '(var1.val)\n', (4859, 4869), True, 'import numpy as np\n'), ((5028, 5038), 'lazydiff.ops.exp', 'ops.exp', (['x'], {}), '(x)\n', (5035, 5038), False, 'from lazydiff import ops\n'), ((5183, 5193), 'lazydiff.ops.sin', 'ops.sin', (['x'], {}), '(x)\n', (5190, 5193), False, 'from lazydiff import ops\n'), ((5196, 5206), 'lazydiff.ops.cos', 'ops.cos', (['x'], {}), '(x)\n', (5203, 5206), False, 'from lazydiff import ops\n'), ((263, 281), 'numpy.array', 'np.array', (['[-1, -1]'], {}), '([-1, -1])\n', (271, 281), True, 'import numpy as np\n'), ((944, 961), 'numpy.arccos', 'np.arccos', (['[0, 0]'], {}), '([0, 0])\n', (953, 961), True, 'import numpy as np\n'), ((1987, 2005), 'numpy.arccosh', 'np.arccosh', (['[2, 2]'], {}), '([2, 2])\n', (1997, 2005), True, 'import numpy as np\n'), ((5271, 5292), 'pytest.approx', 'pytest.approx', (['x3.val'], {}), '(x3.val)\n', (5284, 5292), False, 'import pytest\n'), ((2044, 2060), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2052, 2060), True, 'import numpy as np\n'), ((2063, 2073), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (2070, 2073), True, 'import numpy as np\n'), ((4923, 4937), 'numpy.sqrt', 'np.sqrt', (['(2 / 7)'], {}), '(2 / 7)\n', (4930, 4937), True, 'import numpy as np\n'), ((4910, 4921), 'numpy.sqrt', 'np.sqrt', (['(14)'], {}), '(14)\n', (4917, 4921), True, 'import numpy as np\n'), ((4939, 4950), 'numpy.sqrt', 'np.sqrt', (['(14)'], {}), '(14)\n', (4946, 4950), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np
from dr_phil_hardware.vision.ray import Ray
from shapely.geometry import LineString
import math
from tf import transformations as t
def invert_homog_mat(hm):
""" inverts homogenous matrix expressing rotation and translation in 3D or 2D """
return t.inverse_matrix(hm)
def intersect(ray1 : Ray, ray2 : Ray):
""" returns true if 2D rays intersect, false otherwise """
segment1 = LineString([list(ray1.origin),list(ray1.get_point())])
segment2 = LineString([list(ray2.origin),list(ray2.get_point())])
return segment1.intersects(segment2)
def subtract(ray1, ray2):
""" returns ray1 - ray2
the 2 rays have the same origin, and have finite length"""
origin = ray2.get_point()
dir = ray1.get_point() - origin
length = np.linalg.norm(dir)
return Ray(origin, dir, length)
def interpolated_ray(ray1: Ray,ray2: Ray,r, newL):
""" given rays with the same origin, returns the ray r of the way between the tip of ray1 and the tip of ray2 with given length
Args:
ray1: the start ray
ray2: the end ray
r: the ratio of the distance to interpolate between the tips
newL: the length to give to the new ray
"""
# need to have same origin
assert(np.allclose(ray1.origin,ray2.origin))
tip1 = ray1.get_point()
tip2 = ray2.get_point()
# get interpolation direction
dir = tip2 - tip1
new_tip = tip1 + (dir * r)
new_dir = new_tip - ray1.origin
return Ray(ray1.origin,new_dir,newL)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between([[1], [0], [0]], [[0], [1], [0]])
1.5707963267948966
>>> angle_between([[1], [0], [0]], [[1], [0], [0]])
0.0
>>> angle_between([[1], [0], [0]], [[-1], [0], [0]])
3.141592653589793
"""
angle = float(np.arccos((v1.T @ v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))))
return angle if angle < math.pi else (math.pi * 2) - angle | [
"dr_phil_hardware.vision.ray.Ray",
"numpy.allclose",
"numpy.linalg.norm",
"tf.transformations.inverse_matrix"
] | [((307, 327), 'tf.transformations.inverse_matrix', 't.inverse_matrix', (['hm'], {}), '(hm)\n', (323, 327), True, 'from tf import transformations as t\n'), ((817, 836), 'numpy.linalg.norm', 'np.linalg.norm', (['dir'], {}), '(dir)\n', (831, 836), True, 'import numpy as np\n'), ((849, 873), 'dr_phil_hardware.vision.ray.Ray', 'Ray', (['origin', 'dir', 'length'], {}), '(origin, dir, length)\n', (852, 873), False, 'from dr_phil_hardware.vision.ray import Ray\n'), ((1314, 1351), 'numpy.allclose', 'np.allclose', (['ray1.origin', 'ray2.origin'], {}), '(ray1.origin, ray2.origin)\n', (1325, 1351), True, 'import numpy as np\n'), ((1550, 1581), 'dr_phil_hardware.vision.ray.Ray', 'Ray', (['ray1.origin', 'new_dir', 'newL'], {}), '(ray1.origin, new_dir, newL)\n', (1553, 1581), False, 'from dr_phil_hardware.vision.ray import Ray\n'), ((2000, 2018), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (2014, 2018), True, 'import numpy as np\n'), ((2021, 2039), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (2035, 2039), True, 'import numpy as np\n')] |
from struct import Struct
from numpy import frombuffer
from pyNastran.op2.op2_interface.op2_common import OP2Common
from pyNastran.op2.op2_interface.op2_reader import mapfmt
from pyNastran.op2.tables.ogs_grid_point_stresses.ogs_surface_stresses import (
GridPointSurfaceStressesArray,
GridPointStressesVolumeDirectArray, GridPointStressesVolumePrincipalArray,
GridPointStressesSurfaceDiscontinutiesArray,
GridPointStressesVolumeDiscontinutiesArray,
# strains
GridPointSurfaceStrainsArray, GridPointStrainsVolumeDirectArray,
GridPointStrainsVolumePrincipalArray,
GridPointStrainsSurfaceDiscontinutiesArray
)
class OGS(OP2Common):
def __init__(self):
OP2Common.__init__(self)
def _read_ogstr1_3(self, data: bytes, ndata: int):
"""OGSTR1 - grid point strains"""
self._read_ogs1_3(data, ndata)
def _read_ogs1_3(self, data: bytes, ndata: int):
"""OGS1 - grid point stresses"""
unused_three = self.parse_approach_code(data)
self.words = [
'aCode', 'tCode', '???', 'isubcase',
'???', '???', '???', 'dLoadID',
'format_code', 'num_wide', 'o_code', '???',
'acoustic_flag', '???', '???', '???',
'???', '???', '???', '???',
'???', '???', 'thermal', '???',
'???', 'Title', 'subtitle', 'label']
self.parse_approach_code(data)
#isubcase = self.get_values(data, b'i', 4)
## surface/volumeID
self.ogs = self.add_data_parameter(data, 'ogs_id', b'i', 3, False)
#: Reference coordinate system ID
self.refid = self.add_data_parameter(data, 'refid', b'i', 8, False)
## format code
self.format_code = self.add_data_parameter(data, 'format_code', b'i', 9, False)
## number of words per entry in record
self.num_wide = self.add_data_parameter(data, 'num_wide', b'i', 10, False)
## Stress/Strain code
self.sCode = self.add_data_parameter(data, 'sCode', b'i', 11, False)
## Output Coordinate System
self.oCoord = self.add_data_parameter(data, 'oCoord', b'i', 12, False)
## Axis Specification code
self.axis = self.add_data_parameter(data, 'axis', b'i', 13, False)
#: Normal Specification Code
self.normal = self.add_data_parameter(data, 'normal', b'i', 14, False)
self.fix_format_code()
if not self.is_sort1:
raise NotImplementedError('OGS sort2...')
## assuming tCode=1
if self.analysis_code == 1: # statics
## load set number
self.lsdvmn = self.add_data_parameter(data, 'lsdvmn', b'i', 5, False)
self.data_names = self.apply_data_code_value('data_names', ['lsdvmn'])
self.setNullNonlinearFactor()
elif self.analysis_code == 2: # normal modes/buckling (real eigenvalues)
## mode number
self.mode = self.add_data_parameter(data, 'mode', b'i', 5)
## real eigenvalue
self.eign = self.add_data_parameter(data, 'eign', b'f', 6, False)
self.mode_cycle = 0.0
self.update_mode_cycle('mode_cycle')
self.data_names = self.apply_data_code_value('data_names', ['mode', 'eign', 'mode_cycle'])
#elif self.analysis_code == 3: # differential stiffness
#elif self.analysis_code == 4: # differential stiffness
#elif self.analysis_code == 5: # frequency
elif self.analysis_code == 6: # transient
## time step
self.time = self.add_data_parameter(data, 'time', b'f', 5)
self.data_names = self.apply_data_code_value('data_names', ['time'])
#elif self.analysis_code == 7: # pre-buckling
#elif self.analysis_code == 8: # post-buckling
#elif self.analysis_code == 9: # complex eigenvalues
elif self.analysis_code == 10: # nonlinear statics
## load step
self.lftsfq = self.add_data_parameter(data, 'lftsfq', b'f', 5)
self.data_names = self.apply_data_code_value('data_names', ['lftsfq'])
#elif self.analysis_code == 11: # old geometric nonlinear statics
#elif self.analysis_code == 12: # contran ? (may appear as aCode=6) --> straight from DMAP...grrr...
else:
raise RuntimeError('invalid analysis_code...analysis_code=%s' % self.analysis_code)
#print "*isubcase=%s" % (self.isubcase)
#print "analysis_code=%s table_code=%s thermal=%s" %(self.analysis_code,self.table_code,self.thermal)
#print self.code_information()
if self.is_debug_file:
self.binary_debug.write(' approach_code = %r\n' % self.approach_code)
self.binary_debug.write(' tCode = %r\n' % self.tCode)
self.binary_debug.write(' isubcase = %r\n' % self.isubcase)
self._read_title(data)
self._write_debug_bits()
def _read_ogstr1_4(self, data: bytes, ndata: int) -> int:
"""OGSTR1 - grid point strains"""
return self._read_ogs1_4(data, ndata, restype='strains')
def _read_ogs1_4(self, data: bytes, ndata: int, restype: str='stresses') -> int:
"""OGS1 - grid point stresses"""
if self.table_code == 26:
# OGS1 - grid point stresses - surface
assert self.table_name in [b'OGS1', b'OGSTR1'], f'table_name={self.table_name} table_code={self.table_code}'
n = self._read_ogs1_table26(data, ndata, restype)
elif self.table_code == 27:
#OGS1 - grid point stresses - volume direct
assert self.table_name in [b'OGS1', b'OGSTR1'], f'table_name={self.table_name} table_code={self.table_code}'
n = self._read_ogs1_table27(data, ndata, restype)
elif self.table_code == 28:
#OGS1- grid point stresses - principal
assert self.table_name in [b'OGS1', b'OGSTR1'], f'table_name={self.table_name} table_code={self.table_code}'
n = self._read_ogs1_table28(data, ndata, restype)
elif self.table_code == 35:
# OGS - Grid point stress discontinuities (plane strain)
assert self.table_name in [b'OGS1', b'OGSTR1'], f'table_name={self.table_name} table_code={self.table_code}'
n = self._read_ogs1_table35(data, ndata, restype)
else:
#msg = self.code_information()
raise RuntimeError(self.code_information())
#n = self._not_implemented_or_skip(data, ndata, msg)
del self.ogs
return n
def _read_ogs1_table28(self, data, ndata, restype: str):
if self.num_wide == 15:
n = self._read_ogs1_table28_numwide15(data, ndata, restype)
else:
raise RuntimeError(self.code_information())
return n
def _read_ogs1_table28_numwide15(self, data, ndata, restype: str):
"""
TCODE =28 Volume with principal
1 EKEY I 10*grid point identification number + device code
2 LXA RS Direction cosine from x to a
3 LXB RS Direction cosine from x to b
4 LXC RS Direction cosine from x to c
5 LYA RS Direction cosine from y to a
6 LYB RS Direction cosine from y to b
7 LYC RS Direction cosine from y to c
8 LZA RS Direction cosine from z to a
9 LZB RS Direction cosine from z to b
10 LZC RS Direction cosine from z to c
11 SA RS Principal in a
12 SB RS Principal in b
13 SC RS Principal in c
14 EPR RS Mean pressure
15 EHVM RS Hencky-von Mises or octahedral
"""
result_name = f'grid_point_{restype}_volume_principal'
if 'strain' in restype:
obj_vector_real = GridPointStrainsVolumePrincipalArray
else:
obj_vector_real = GridPointStressesVolumePrincipalArray
if self._results.is_not_saved(result_name):
self.log.warning(f'skipping {result_name}')
return ndata
self._results._found_result(result_name)
slot = getattr(self, result_name)
n = 0
#result_name, is_random = self._apply_oes_ato_crm_psd_rms_no(result_name)
ntotal = 60 * self.factor # 15 * 4
nelements = ndata // ntotal
assert ndata % ntotal == 0
auto_return, is_vectorized = self._create_oes_object4(
nelements, result_name, slot, obj_vector_real)
if auto_return:
return nelements * ntotal
obj = self.obj
dt = self.nonlinear_factor
if self.use_vector and is_vectorized and 0:
n = nelements * ntotal
#itotal = obj.ielement
#ielement2 = obj.itotal + nelements
#itotal2 = ielement2
#floats = frombuffer(data, dtype=self.fdtype).reshape(nelements, 11).copy()
#obj._times[obj.itime] = dt
#if obj.itime == 0:
#ints = frombuffer(data, dtype=self.idtype).reshape(nelements, 11).copy()
#nids = ints[:, 0] // 10
#eids = ints[:, 1]
#assert nids.min() > 0, nids.min()
#obj.node_element[itotal:itotal2, 0] = nids
#obj.node_element[itotal:itotal2, 1] = eids
##[lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm]
#strings = frombuffer(data, dtype=self._uendian + 'S4').reshape(nelements, 11)[:, 2].copy()
#obj.location[itotal:itotal2] = strings
#obj.data[obj.itime, itotal:itotal2, :] = floats[:, 3:]#.copy()
#obj.itotal = itotal2
#obj.ielement = ielement2
#n = ndata
else:
s = Struct(mapfmt(self._endian + b'i14f', self.size))
#nelements = ndata // 60 # 15*4
for unused_i in range(nelements):
edata = data[n:n+ntotal]
out = s.unpack(edata)
(eid_device, lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm) = out
eid = eid_device // 10
assert eid > 0, eid
#self.obj.add_sort1(dt, eid, lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc,
#sa, sb, sc, epr, ovm)
n += ntotal
assert ndata > 0, ndata
assert nelements > 0, f'nelements={nelements} element_type={self.element_type} element_name={self.element_name!r}'
#assert ndata % ntotal == 0, '%s n=%s nwide=%s len=%s ntotal=%s' % (self.element_name, ndata % ntotal, ndata % self.num_wide, ndata, ntotal)
assert self.num_wide * 4 * self.factor == ntotal, 'numwide*4=%s ntotal=%s' % (self.num_wide * 4, ntotal)
assert n > 0, f'n = {n} result_name={result_name}'
return n
#-----------------------------------------------------------------------------------
def _read_ogs1_table26(self, data: bytes, ndata: int, restype: str) -> int:
"""reads grid point stresses"""
if self.num_wide == 11: # real/random
n = self._read_ogs1_table26_numwide11(data, ndata, restype)
else:
msg = f'only num_wide=11 is allowed num_wide={self.num_wide}'
raise NotImplementedError(msg)
return n
def _read_ogs1_table26_numwide11(self, data: bytes, ndata: int, restype: str) -> int:
"""surface stresses"""
result_name = f'grid_point_surface_{restype}'
if 'strain' in restype:
obj_vector_real = GridPointSurfaceStrainsArray
else:
obj_vector_real = GridPointSurfaceStressesArray
if self._results.is_not_saved(result_name):
self.log.warning(f'skipping {result_name}')
return ndata
self._results._found_result(result_name)
slot = getattr(self, result_name)
n = 0
#result_name, is_random = self._apply_oes_ato_crm_psd_rms_no(result_name)
ntotal = 44 * self.factor # 4*11
nelements = ndata // ntotal
auto_return, is_vectorized = self._create_oes_object4(
nelements, result_name, slot, obj_vector_real)
if auto_return:
return nelements * ntotal
obj = self.obj
dt = self.nonlinear_factor
if self.use_vector and is_vectorized:
n = nelements * ntotal
itotal = obj.ielement
ielement2 = obj.itotal + nelements
itotal2 = ielement2
floats = frombuffer(data, dtype=self.fdtype8).reshape(nelements, 11).copy()
obj._times[obj.itime] = dt
if obj.itime == 0:
ints = frombuffer(data, dtype=self.idtype8).reshape(nelements, 11).copy()
nids = ints[:, 0] // 10
eids = ints[:, 1]
assert nids.min() > 0, nids.min()
obj.node_element[itotal:itotal2, 0] = nids
obj.node_element[itotal:itotal2, 1] = eids
#[fiber, nx, ny, txy, angle, major, minor, tmax, ovm]
s4 = 'S%i' % self.size
strings = frombuffer(data, dtype=self._uendian + s4).reshape(nelements, 11)[:, 2].copy()
obj.location[itotal:itotal2] = strings
obj.data[obj.itime, itotal:itotal2, :] = floats[:, 3:]#.copy()
obj.itotal = itotal2
obj.ielement = ielement2
n = ndata
else:
fmt = self._endian + (b'2i4s8f' if self.size == 4 else b'2q8s8d')
s = Struct(fmt)
nelements = ndata // ntotal # 11*4
for unused_i in range(nelements):
edata = data[n:n+ntotal]
out = s.unpack(edata)
(nid_device, eid, fiber, nx, ny, txy, angle, major, minor, tmax, ovm) = out
nid = nid_device // 10
fiber = fiber.decode('utf-8').strip()
assert nid > 0, nid
self.obj.add_sort1(dt, nid, eid, fiber, nx, ny, txy,
angle, major, minor, tmax, ovm)
n += ntotal
assert ndata > 0, ndata
assert nelements > 0, 'nelements=%r element_type=%s element_name=%r' % (nelements, self.element_type, self.element_name)
#assert ndata % ntotal == 0, '%s n=%s nwide=%s len=%s ntotal=%s' % (self.element_name, ndata % ntotal, ndata % self.num_wide, ndata, ntotal)
#assert self.num_wide * 4 * self.factor == ntotal, 'numwide*4=%s ntotal=%s' % (self.num_wide * 4, ntotal)
assert n > 0, f'n = {n} result_name={result_name}'
return n
def _read_ogs1_table27(self, data: bytes, ndata: int, restype: str) -> int:
"""OGS1 - grid point stresses - volume direct"""
#is_sort1 = self.is_sort1
if self.num_wide == 9: # real/random
#result_name = 'grid_point_stresses_volume_direct'
n = self._read_ogs1_table27_numwide9(data, ndata, restype)
else:
msg = self.code_information()
#msg = 'only num_wide=9 is allowed num_wide=%s' % self.num_wide
raise RuntimeError(msg)
return n
def _read_ogs1_table27_numwide9(self, data: bytes, ndata: int, restype: str) -> int:
"""
TCODE =27 Volume with direct
1 EKEY I 10*grid point identification number + Device Code
2 NX RS Normal in x
3 NY RS Normal in y
4 NZ RS Normal in z
5 TXY RS Shear in xy
6 TYZ RS Shear in yz
7 TZX RS Shear in zx
8 PR RS Mean pressure
9 HVM RS Hencky-von Mises or Octahedral
"""
result_name = f'grid_point_{restype}_volume_direct'
if self._results.is_not_saved(result_name):
self.log.warning(f'skipping {result_name}')
return ndata
if 'strain' in restype:
obj_vector_real = GridPointStrainsVolumeDirectArray
else:
obj_vector_real = GridPointStressesVolumeDirectArray
self._results._found_result(result_name)
slot = getattr(self, result_name)
n = 0
#result_name, is_random = self._apply_oes_ato_crm_psd_rms_no(result_name)
ntotal = 36 * self.factor # 9 * 4
nelements = ndata // ntotal
assert ndata % (nelements * ntotal) == 0, ndata % (nelements * ntotal)
auto_return, is_vectorized = self._create_oes_object4(
nelements, result_name, slot, obj_vector_real)
if auto_return:
return nelements * ntotal
obj = self.obj
dt = self.nonlinear_factor
if self.use_vector and is_vectorized:
n = nelements * ntotal
itotal = obj.ielement
ielement2 = obj.itotal + nelements
itotal2 = ielement2
floats = frombuffer(data, dtype=self.fdtype8).reshape(nelements, 9)#.copy()
obj._times[obj.itime] = dt
if obj.itime == 0:
ints = frombuffer(data, dtype=self.idtype8).reshape(nelements, 9)
nids = ints[:, 0] // 10
assert nids.min() > 0, nids.min()
obj.node[itotal:itotal2] = nids
#[nid, nx, ny, nz, txy, tyz, txz, pressure, ovm]
#strings = frombuffer(data, dtype=self._uendian + 'S4').reshape(nelements, 11)[:, 2].copy()
#obj.location[itotal:itotal2] = strings
obj.data[obj.itime, itotal:itotal2, :] = floats[:, 1:]#.copy()
obj.itotal = itotal2
obj.ielement = ielement2
n = ndata
else:
fmt = mapfmt(self._endian + b'i8f', self.size)
s = Struct(fmt)
for unused_i in range(nelements):
edata = data[n:n+ntotal]
out = s.unpack(edata)
(nid_device, nx, ny, nz, txy, tyz, txz, pressure, ovm) = out
nid = nid_device // 10
assert nid > 0, nid
self.obj.add_sort1(dt, nid, nx, ny, nz, txy, tyz, txz, pressure, ovm)
n += ntotal
return n
def _read_ogs1_table35(self, data: bytes, ndata: int, restype: str) -> int:
"""
grid point stress discontinuities (plane stress/strain)
TCODE =35 Grid point stresses for surfaces with plane strain
1 EKEY I 10*grid point identification number and grid code
2 NX RS Normal in x
3 NY RS Normal in y
4 NZ RS Normal in z (always -1)
5 TXY RS Shear in xy
6 PR RS Mean pressure (always -1)
"""
if restype in 'strains':
result_name = 'grid_point_strain_discontinuities'
else:
result_name = 'grid_point_stress_discontinuities'
if self._results.is_not_saved(result_name):
self.log.warning(f'skipping {result_name}')
return ndata
self._results._found_result(result_name)
slot = getattr(self, result_name)
n = 0
if self.num_wide == 6:
if 'strain' in restype:
obj_vector_real = GridPointStrainsSurfaceDiscontinutiesArray
else:
obj_vector_real = GridPointStressesSurfaceDiscontinutiesArray
#result_name, is_random = self._apply_oes_ato_crm_psd_rms_no(result_name)
ntotal = 6 * 4 * self.factor
nelements = ndata // ntotal
assert ndata % (nelements * ntotal) == 0, ndata % (nelements * ntotal)
auto_return, is_vectorized = self._create_oes_object4(
nelements, result_name, slot, obj_vector_real)
if auto_return:
return nelements * ntotal
obj = self.obj
dt = self.nonlinear_factor
if self.use_vector and is_vectorized:
n = nelements * ntotal
itotal = obj.ielement
ielement2 = obj.itotal + nelements
itotal2 = ielement2
floats = frombuffer(data, dtype=self.fdtype).reshape(nelements, 6)#.copy()
obj._times[obj.itime] = dt
if obj.itime == 0:
ints = frombuffer(data, dtype=self.idtype).reshape(nelements, 6)
nids = ints[:, 0] // 10
assert nids.min() > 0, nids.min()
obj.node[itotal:itotal2] = nids
#[nid, nx, ny, nz, txy, pressure]
obj.data[obj.itime, itotal:itotal2, :] = floats[:, 1:]#.copy()
obj.itotal = itotal2
obj.ielement = ielement2
n = ndata
else:
s = Struct(mapfmt(self._endian + b'i5f', self.size))
nelements = ndata // ntotal # 6*4
for unused_i in range(nelements):
out = s.unpack(data[n:n+ntotal])
(nid_device, nx, ny, nz, txy, pressure) = out
nid = nid_device // 10
assert nid > 0, nid
self.obj.add_sort1(dt, nid, nx, ny, nz, txy, pressure)
n += ntotal
else:
msg = 'only num_wide=11 is allowed num_wide=%s' % self.num_wide
raise RuntimeError(msg)
return n
| [
"struct.Struct",
"numpy.frombuffer",
"pyNastran.op2.op2_interface.op2_reader.mapfmt",
"pyNastran.op2.op2_interface.op2_common.OP2Common.__init__"
] | [((695, 719), 'pyNastran.op2.op2_interface.op2_common.OP2Common.__init__', 'OP2Common.__init__', (['self'], {}), '(self)\n', (713, 719), False, 'from pyNastran.op2.op2_interface.op2_common import OP2Common\n'), ((13400, 13411), 'struct.Struct', 'Struct', (['fmt'], {}), '(fmt)\n', (13406, 13411), False, 'from struct import Struct\n'), ((17425, 17465), 'pyNastran.op2.op2_interface.op2_reader.mapfmt', 'mapfmt', (["(self._endian + b'i8f')", 'self.size'], {}), "(self._endian + b'i8f', self.size)\n", (17431, 17465), False, 'from pyNastran.op2.op2_interface.op2_reader import mapfmt\n'), ((17482, 17493), 'struct.Struct', 'Struct', (['fmt'], {}), '(fmt)\n', (17488, 17493), False, 'from struct import Struct\n'), ((9665, 9706), 'pyNastran.op2.op2_interface.op2_reader.mapfmt', 'mapfmt', (["(self._endian + b'i14f')", 'self.size'], {}), "(self._endian + b'i14f', self.size)\n", (9671, 9706), False, 'from pyNastran.op2.op2_interface.op2_reader import mapfmt\n'), ((16651, 16687), 'numpy.frombuffer', 'frombuffer', (['data'], {'dtype': 'self.fdtype8'}), '(data, dtype=self.fdtype8)\n', (16661, 16687), False, 'from numpy import frombuffer\n'), ((20443, 20483), 'pyNastran.op2.op2_interface.op2_reader.mapfmt', 'mapfmt', (["(self._endian + b'i5f')", 'self.size'], {}), "(self._endian + b'i5f', self.size)\n", (20449, 20483), False, 'from pyNastran.op2.op2_interface.op2_reader import mapfmt\n'), ((16811, 16847), 'numpy.frombuffer', 'frombuffer', (['data'], {'dtype': 'self.idtype8'}), '(data, dtype=self.idtype8)\n', (16821, 16847), False, 'from numpy import frombuffer\n'), ((19785, 19820), 'numpy.frombuffer', 'frombuffer', (['data'], {'dtype': 'self.fdtype'}), '(data, dtype=self.fdtype)\n', (19795, 19820), False, 'from numpy import frombuffer\n'), ((12402, 12438), 'numpy.frombuffer', 'frombuffer', (['data'], {'dtype': 'self.fdtype8'}), '(data, dtype=self.fdtype8)\n', (12412, 12438), False, 'from numpy import frombuffer\n'), ((19956, 19991), 'numpy.frombuffer', 'frombuffer', (['data'], {'dtype': 'self.idtype'}), '(data, dtype=self.idtype)\n', (19966, 19991), False, 'from numpy import frombuffer\n'), ((12562, 12598), 'numpy.frombuffer', 'frombuffer', (['data'], {'dtype': 'self.idtype8'}), '(data, dtype=self.idtype8)\n', (12572, 12598), False, 'from numpy import frombuffer\n'), ((12995, 13037), 'numpy.frombuffer', 'frombuffer', (['data'], {'dtype': '(self._uendian + s4)'}), '(data, dtype=self._uendian + s4)\n', (13005, 13037), False, 'from numpy import frombuffer\n')] |
import numpy as np
def naive_contrast_image(image):
result = np.zeros(image.shape, dtype=np.uint8)
min_color, max_color = np.min(image), np.max(image)
delta_color = max_color-min_color
for row in range(image.shape[0]):
for col in range(image.shape[1]):
pixel = image[row,col]
result[row,col] = 255*(pixel-min_color)/delta_color
return result
| [
"numpy.max",
"numpy.zeros",
"numpy.min"
] | [((66, 103), 'numpy.zeros', 'np.zeros', (['image.shape'], {'dtype': 'np.uint8'}), '(image.shape, dtype=np.uint8)\n', (74, 103), True, 'import numpy as np\n'), ((131, 144), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (137, 144), True, 'import numpy as np\n'), ((146, 159), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (152, 159), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import cv2
import time
import numpy as np
import pathmagic # noqa
import panorama._refmodels.face.detect_face as detect_face
import panorama._refmodels.face.facenet as facenet
from panorama._refmodels.face.facenet import prewhiten
slim = tf.contrib.slim
class RefFaceDetector(object):
def __init__(self,
mtcnn_weights
):
self.init_cnn_graph(mtcnn_weights)
self.minsize = 20 # minimum size of face
self.threshold = [0.6, 0.7, 0.7] # three steps's threshold
self.factor = 0.709 # scale factor
self.detection_window_size_ratio = 1 / 8 # 20/160
self.class_names = ['face']
def init_cnn_graph(self, checkpoints_path):
self.graph = tf.Graph()
self.sess = tf.Session(graph=self.graph)
with self.graph.as_default():
self.pnet, \
self.rnet, \
self.onet = detect_face.create_mtcnn(
self.sess, checkpoints_path)
def load_from_disk(self, image_path):
image_data = cv2.imread(image_path)
image_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)
return image_data
def predict(self, image_data):
bounding_boxes, _, duration = detect_face.detect_face(
image_data,
self.minsize,
self.pnet,
self.rnet,
self.onet,
self.threshold,
self.factor)
out_boxes = np.array([bounding_box[:4]
for bounding_box in bounding_boxes])
out_scores = np.array([bounding_box[4]
for bounding_box in bounding_boxes])
out_classes = np.array([0] * bounding_boxes.shape[0])
return out_boxes, out_scores, out_classes, duration
class RefFaceExtractor(object):
def __init__(self,
facenet_weights
):
self.init_cnn_graph(facenet_weights)
self.image_size = (160, 160)
def init_cnn_graph(self, checkpoints_path):
self.graph = tf.Graph()
self.sess = tf.Session(graph=self.graph)
with self.graph.as_default():
print('Loading feature extraction model')
facenet.load_model(checkpoints_path)
self.images_placeholder = \
tf.get_default_graph().get_tensor_by_name("input:0")
self.embeddings = \
tf.get_default_graph().get_tensor_by_name("embeddings:0")
self.phase_train_placeholder = tf.get_default_graph(
).get_tensor_by_name("phase_train:0")
self.embedding_size = self.embeddings.get_shape()[1]
def load_from_disk(self, image_path):
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_data = self.load_from_cv2(image)
return image_data
def load_from_cv2(self, image_cv2):
image_cv2 = cv2.resize(image_cv2, self.image_size)
image_cv2 = prewhiten(image_cv2)
image_data = image_cv2.reshape(-1, self.image_size[0],
self.image_size[1], 3)
return image_data
def load_from_pil(self, image_pil):
image_data = np.array(image_pil, dtype=np.float32)
image_data = self.load_from_cv2(image_data)
return image_data
def extract_features(self, image_data):
feed_dict = {self.images_placeholder: image_data,
self.phase_train_placeholder: False}
facenet_time = time.time()
emb = self.sess.run(self.embeddings, feed_dict=feed_dict)[0]
duration = time.time() - facenet_time
emb = emb.astype(np.float32)
return emb, duration
| [
"panorama._refmodels.face.detect_face.create_mtcnn",
"tensorflow.Graph",
"panorama._refmodels.face.facenet.prewhiten",
"tensorflow.Session",
"panorama._refmodels.face.detect_face.detect_face",
"numpy.array",
"cv2.cvtColor",
"time.time",
"panorama._refmodels.face.facenet.load_model",
"cv2.resize",
... | [((868, 878), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (876, 878), True, 'import tensorflow as tf\n'), ((899, 927), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph'}), '(graph=self.graph)\n', (909, 927), True, 'import tensorflow as tf\n'), ((1187, 1209), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1197, 1209), False, 'import cv2\n'), ((1231, 1274), 'cv2.cvtColor', 'cv2.cvtColor', (['image_data', 'cv2.COLOR_BGR2RGB'], {}), '(image_data, cv2.COLOR_BGR2RGB)\n', (1243, 1274), False, 'import cv2\n'), ((1375, 1490), 'panorama._refmodels.face.detect_face.detect_face', 'detect_face.detect_face', (['image_data', 'self.minsize', 'self.pnet', 'self.rnet', 'self.onet', 'self.threshold', 'self.factor'], {}), '(image_data, self.minsize, self.pnet, self.rnet,\n self.onet, self.threshold, self.factor)\n', (1398, 1490), True, 'import panorama._refmodels.face.detect_face as detect_face\n'), ((1592, 1655), 'numpy.array', 'np.array', (['[bounding_box[:4] for bounding_box in bounding_boxes]'], {}), '([bounding_box[:4] for bounding_box in bounding_boxes])\n', (1600, 1655), True, 'import numpy as np\n'), ((1707, 1769), 'numpy.array', 'np.array', (['[bounding_box[4] for bounding_box in bounding_boxes]'], {}), '([bounding_box[4] for bounding_box in bounding_boxes])\n', (1715, 1769), True, 'import numpy as np\n'), ((1823, 1862), 'numpy.array', 'np.array', (['([0] * bounding_boxes.shape[0])'], {}), '([0] * bounding_boxes.shape[0])\n', (1831, 1862), True, 'import numpy as np\n'), ((2187, 2197), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2195, 2197), True, 'import tensorflow as tf\n'), ((2218, 2246), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph'}), '(graph=self.graph)\n', (2228, 2246), True, 'import tensorflow as tf\n'), ((2842, 2864), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (2852, 2864), False, 'import cv2\n'), ((2881, 2919), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (2893, 2919), False, 'import cv2\n'), ((3054, 3092), 'cv2.resize', 'cv2.resize', (['image_cv2', 'self.image_size'], {}), '(image_cv2, self.image_size)\n', (3064, 3092), False, 'import cv2\n'), ((3113, 3133), 'panorama._refmodels.face.facenet.prewhiten', 'prewhiten', (['image_cv2'], {}), '(image_cv2)\n', (3122, 3133), False, 'from panorama._refmodels.face.facenet import prewhiten\n'), ((3347, 3384), 'numpy.array', 'np.array', (['image_pil'], {'dtype': 'np.float32'}), '(image_pil, dtype=np.float32)\n', (3355, 3384), True, 'import numpy as np\n'), ((3648, 3659), 'time.time', 'time.time', ([], {}), '()\n', (3657, 3659), False, 'import time\n'), ((1048, 1101), 'panorama._refmodels.face.detect_face.create_mtcnn', 'detect_face.create_mtcnn', (['self.sess', 'checkpoints_path'], {}), '(self.sess, checkpoints_path)\n', (1072, 1101), True, 'import panorama._refmodels.face.detect_face as detect_face\n'), ((2351, 2387), 'panorama._refmodels.face.facenet.load_model', 'facenet.load_model', (['checkpoints_path'], {}), '(checkpoints_path)\n', (2369, 2387), True, 'import panorama._refmodels.face.facenet as facenet\n'), ((3748, 3759), 'time.time', 'time.time', ([], {}), '()\n', (3757, 3759), False, 'import time\n'), ((2444, 2466), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (2464, 2466), True, 'import tensorflow as tf\n'), ((2545, 2567), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (2565, 2567), True, 'import tensorflow as tf\n'), ((2646, 2668), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (2666, 2668), True, 'import tensorflow as tf\n')] |
import numpy as np
import minibatch
import sys
import cv2
sys.path.append("../")
from config import config
class TestLoader:
def __init__(self, imdb, batch_size=1, shuffle=False):
self.imdb = imdb
self.batch_size = batch_size
self.shuffle = shuffle
self.size = len(imdb)#num of data
#self.index = np.arange(self.size)
self.cur = 0
self.data = None
self.label = None
self.reset()
self.get_batch()
def reset(self):
self.cur = 0
if self.shuffle:
#shuffle test image
np.random.shuffle(self.imdb)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return self.data
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
imdb = self.imdb[self.cur]
'''
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
#picked image
imdb = [self.imdb[self.index[i]] for i in range(cur_from, cur_to)]
# print(imdb)
'''
#print type(imdb)
#print len(imdb)
#assert len(imdb) == 1, "Single batch only"
im = cv2.imread(imdb)
self.data = im
class ImageLoader:
def __init__(self, imdb, im_size, batch_size=config.BATCH_SIZE, shuffle=False):
self.imdb = imdb
self.batch_size = batch_size
self.im_size = im_size
self.shuffle = shuffle
self.cur = 0
self.size = len(imdb)
self.index = np.arange(self.size)
self.num_classes = 2
self.batch = None
self.data = None
self.label = None
self.label_names = ['label', 'bbox_target']
self.reset()
self.get_batch()
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return self.data, self.label
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
imdb = [self.imdb[self.index[i]] for i in range(cur_from, cur_to)]
data, label = minibatch.get_minibatch(imdb, self.num_classes, self.im_size)
self.data = data['data']
self.label = [label[name] for name in self.label_names]
| [
"cv2.imread",
"minibatch.get_minibatch",
"sys.path.append",
"numpy.arange",
"numpy.random.shuffle"
] | [((58, 80), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (73, 80), False, 'import sys\n'), ((1651, 1667), 'cv2.imread', 'cv2.imread', (['imdb'], {}), '(imdb)\n', (1661, 1667), False, 'import cv2\n'), ((1993, 2013), 'numpy.arange', 'np.arange', (['self.size'], {}), '(self.size)\n', (2002, 2013), True, 'import numpy as np\n'), ((3159, 3220), 'minibatch.get_minibatch', 'minibatch.get_minibatch', (['imdb', 'self.num_classes', 'self.im_size'], {}), '(imdb, self.num_classes, self.im_size)\n', (3182, 3220), False, 'import minibatch\n'), ((604, 632), 'numpy.random.shuffle', 'np.random.shuffle', (['self.imdb'], {}), '(self.imdb)\n', (621, 632), True, 'import numpy as np\n'), ((2300, 2329), 'numpy.random.shuffle', 'np.random.shuffle', (['self.index'], {}), '(self.index)\n', (2317, 2329), True, 'import numpy as np\n')] |
# ----------------------------------------------------------------------------
# Title: Scientific Visualisation - Python & Matplotlib
# Author: <NAME>
# License: BSD
# ----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import PolyCollection
def frustum(left, right, bottom, top, znear, zfar):
M = np.zeros((4, 4))
M[0, 0] = +2.0 * znear / (right - left)
M[2, 0] = (right + left) / (right - left)
M[1, 1] = +2.0 * znear / (top - bottom)
M[2, 1] = (top + bottom) / (top - bottom)
M[2, 2] = -(zfar + znear) / (zfar - znear)
M[3, 2] = -2.0 * znear * zfar / (zfar - znear)
M[2, 3] = -1.0
return M.T
def perspective(fovy, aspect, znear, zfar):
h = np.tan(fovy / 360.0 * np.pi) * znear
w = h * aspect
return frustum(-w, w, -h, h, znear, zfar)
def scale(x, y, z):
return np.array(
[[x, 0, 0, 0], [0, y, 0, 0], [0, 0, z, 0], [0, 0, 0, 1]], dtype=float
)
def zoom(z):
return scale(z, z, z)
def translate(x, y, z):
return np.array(
[[1, 0, 0, x], [0, 1, 0, y], [0, 0, 1, z], [0, 0, 0, 1]], dtype=float
)
def xrotate(theta):
t = np.pi * theta / 180
c, s = np.cos(t), np.sin(t)
return np.array(
[[1, 0, 0, 0], [0, c, -s, 0], [0, s, c, 0], [0, 0, 0, 1]], dtype=float
)
def yrotate(theta):
t = np.pi * theta / 180
c, s = np.cos(t), np.sin(t)
return np.array(
[[c, 0, s, 0], [0, 1, 0, 0], [-s, 0, c, 0], [0, 0, 0, 1]], dtype=float
)
def obj_load(filename):
V, Vi = [], []
with open(filename) as f:
for line in f.readlines():
if line.startswith("#"):
continue
values = line.split()
if not values:
continue
if values[0] == "v":
V.append([float(x) for x in values[1:4]])
elif values[0] == "f":
Vi.append([int(x) for x in values[1:4]])
return np.array(V), np.array(Vi) - 1
# -----------------------------------------------------------------------------
# Loading and centering
V, Vi = obj_load("bunny.obj")
V = (V - (V.max(axis=0) + V.min(axis=0)) / 2) / max(V.max(axis=0) - V.min(axis=0))
# Computing model-view-projection matrix
model = zoom(1.5) @ xrotate(20) @ yrotate(45)
view = translate(0, 0, -4.5)
proj = perspective(25, 1, 1, 100)
MVP = proj @ view @ model
# Applying MVP
VH = np.c_[V, np.ones(len(V))] # Homogenous coordinates
VT = VH @ MVP.T # Transformed coordinates
VN = VT / VT[:, 3].reshape(-1, 1) # Normalization
VS = VN[:, :3] # Normalized device coordinates
# Actual faces
V = VS[Vi]
# Backface culling
CW = (
(V[:, 1, 0] - V[:, 0, 0]) * (V[:, 1, 1] + V[:, 0, 1])
+ (V[:, 2, 0] - V[:, 1, 0]) * (V[:, 2, 1] + V[:, 1, 1])
+ (V[:, 0, 0] - V[:, 2, 0]) * (V[:, 0, 1] + V[:, 2, 1])
)
V = V[CW < 0]
# Rendering as a collection of polygons (triangles)
segments = V[:, :, :2]
zbuffer = -V[:, :, 2].mean(axis=1)
# Color according to depth
zmin, zmax = zbuffer.min(), zbuffer.max()
zbuffer = (zbuffer - zmin) / (zmax - zmin)
colors = plt.get_cmap("magma")(zbuffer)
# Sort triangles according to z buffer
I = np.argsort(zbuffer)
segments, colors = segments[I, :], colors[I, :]
# Actual rendering
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0, 0, 1, 1], xlim=[-1, +1], ylim=[-1, +1], aspect=1)
ax.axis("off")
for fc, ec, lw in [
("None", "black", 6.0),
("None", "white", 3.0),
(colors, "black", 0.25),
]:
collection = PolyCollection(
segments, closed=True, linewidth=lw, facecolor=fc, edgecolor=ec
)
ax.add_collection(collection)
plt.savefig("../../figures/threed/bunny.pdf", transparent=True)
plt.show()
| [
"matplotlib.pyplot.savefig",
"numpy.tan",
"matplotlib.collections.PolyCollection",
"numpy.argsort",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.show"
] | [((3222, 3241), 'numpy.argsort', 'np.argsort', (['zbuffer'], {}), '(zbuffer)\n', (3232, 3241), True, 'import numpy as np\n'), ((3316, 3342), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (3326, 3342), True, 'import matplotlib.pyplot as plt\n'), ((3685, 3748), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../../figures/threed/bunny.pdf"""'], {'transparent': '(True)'}), "('../../figures/threed/bunny.pdf', transparent=True)\n", (3696, 3748), True, 'import matplotlib.pyplot as plt\n'), ((3749, 3759), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3757, 3759), True, 'import matplotlib.pyplot as plt\n'), ((412, 428), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (420, 428), True, 'import numpy as np\n'), ((930, 1009), 'numpy.array', 'np.array', (['[[x, 0, 0, 0], [0, y, 0, 0], [0, 0, z, 0], [0, 0, 0, 1]]'], {'dtype': 'float'}), '([[x, 0, 0, 0], [0, y, 0, 0], [0, 0, z, 0], [0, 0, 0, 1]], dtype=float)\n', (938, 1009), True, 'import numpy as np\n'), ((1102, 1181), 'numpy.array', 'np.array', (['[[1, 0, 0, x], [0, 1, 0, y], [0, 0, 1, z], [0, 0, 0, 1]]'], {'dtype': 'float'}), '([[1, 0, 0, x], [0, 1, 0, y], [0, 0, 1, z], [0, 0, 0, 1]], dtype=float)\n', (1110, 1181), True, 'import numpy as np\n'), ((1289, 1374), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, c, -s, 0], [0, s, c, 0], [0, 0, 0, 1]]'], {'dtype': 'float'}), '([[1, 0, 0, 0], [0, c, -s, 0], [0, s, c, 0], [0, 0, 0, 1]], dtype=float\n )\n', (1297, 1374), True, 'import numpy as np\n'), ((1477, 1562), 'numpy.array', 'np.array', (['[[c, 0, s, 0], [0, 1, 0, 0], [-s, 0, c, 0], [0, 0, 0, 1]]'], {'dtype': 'float'}), '([[c, 0, s, 0], [0, 1, 0, 0], [-s, 0, c, 0], [0, 0, 0, 1]], dtype=float\n )\n', (1485, 1562), True, 'import numpy as np\n'), ((3147, 3168), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""magma"""'], {}), "('magma')\n", (3159, 3168), True, 'import matplotlib.pyplot as plt\n'), ((3556, 3635), 'matplotlib.collections.PolyCollection', 'PolyCollection', (['segments'], {'closed': '(True)', 'linewidth': 'lw', 'facecolor': 'fc', 'edgecolor': 'ec'}), '(segments, closed=True, linewidth=lw, facecolor=fc, edgecolor=ec)\n', (3570, 3635), False, 'from matplotlib.collections import PolyCollection\n'), ((795, 823), 'numpy.tan', 'np.tan', (['(fovy / 360.0 * np.pi)'], {}), '(fovy / 360.0 * np.pi)\n', (801, 823), True, 'import numpy as np\n'), ((1257, 1266), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (1263, 1266), True, 'import numpy as np\n'), ((1268, 1277), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1274, 1277), True, 'import numpy as np\n'), ((1445, 1454), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (1451, 1454), True, 'import numpy as np\n'), ((1456, 1465), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1462, 1465), True, 'import numpy as np\n'), ((2024, 2035), 'numpy.array', 'np.array', (['V'], {}), '(V)\n', (2032, 2035), True, 'import numpy as np\n'), ((2037, 2049), 'numpy.array', 'np.array', (['Vi'], {}), '(Vi)\n', (2045, 2049), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
from scipy.optimize import linprog
from pandapower.estimation.algorithm.matrix_base import BaseAlgebra
from pandapower.estimation.algorithm.base import BaseAlgorithm
class LPAlgorithm(BaseAlgorithm):
def estimate(self, eppci, **kwargs):
if "estimator" in kwargs and kwargs["estimator"].lower() != "lav": # pragma: no cover
self.logger.warning("LP Algorithm supports only LAV Estimator!! Set to LAV!!")
# matrix calculation object
sem = BaseAlgebra(eppci)
current_error, cur_it = 100., 0
E = eppci.E
while current_error > self.tolerance and cur_it < self.max_iterations:
self.logger.debug("Starting iteration {:d}".format(1 + cur_it))
try:
# residual r
r = sem.create_rx(E)
# jacobian matrix H
H = sem.create_hx_jacobian(E)
# state vector difference d_E
# d_E = G_m^-1 * (H' * R^-1 * r)
d_E = self.solve_lp(H, E, r)
E += d_E
eppci.update_E(E)
# prepare next iteration
cur_it += 1
current_error = np.max(np.abs(d_E))
self.logger.debug("Current error: {:.7f}".format(current_error))
except np.linalg.linalg.LinAlgError: # pragma: no cover
self.logger.error("A problem appeared while using the linear algebra methods."
"Check and change the measurement set.")
return False
# check if the estimation is successfull
self.check_result(current_error, cur_it)
return eppci
def solve_lp(self, H, x, r):
n, m = H.shape[1], H.shape[0]
zero_n = np.zeros((n, 1))
one_m = np.ones((m, 1))
Im = np.eye(m)
c_T = np.r_[zero_n, zero_n, one_m, one_m]
A = np.c_[H, -H, Im, -Im]
res = linprog(c_T.ravel(), A_eq=A, b_eq=r,
method="simplex", options={'tol': 1e-5, 'disp': True, 'maxiter':20000})
if res.success:
d_x = np.array(res['x'][:n]).ravel() - np.array(res['x'][n:2*n]).ravel()
return d_x
else: # pragma: no cover
raise np.linalg.linalg.LinAlgError
| [
"numpy.abs",
"numpy.eye",
"numpy.ones",
"pandapower.estimation.algorithm.matrix_base.BaseAlgebra",
"numpy.array",
"numpy.zeros"
] | [((688, 706), 'pandapower.estimation.algorithm.matrix_base.BaseAlgebra', 'BaseAlgebra', (['eppci'], {}), '(eppci)\n', (699, 706), False, 'from pandapower.estimation.algorithm.matrix_base import BaseAlgebra\n'), ((1968, 1984), 'numpy.zeros', 'np.zeros', (['(n, 1)'], {}), '((n, 1))\n', (1976, 1984), True, 'import numpy as np\n'), ((2001, 2016), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (2008, 2016), True, 'import numpy as np\n'), ((2030, 2039), 'numpy.eye', 'np.eye', (['m'], {}), '(m)\n', (2036, 2039), True, 'import numpy as np\n'), ((1398, 1409), 'numpy.abs', 'np.abs', (['d_E'], {}), '(d_E)\n', (1404, 1409), True, 'import numpy as np\n'), ((2314, 2336), 'numpy.array', 'np.array', (["res['x'][:n]"], {}), "(res['x'][:n])\n", (2322, 2336), True, 'import numpy as np\n'), ((2347, 2374), 'numpy.array', 'np.array', (["res['x'][n:2 * n]"], {}), "(res['x'][n:2 * n])\n", (2355, 2374), True, 'import numpy as np\n')] |
import socket
import time
import os
import numpy as np
import matplotlib.pyplot as plt
from src.algorithms.QDoubleDeepLearn import QLearn # can be QLearn, QDeepLearn, QDoubleDeepLearn or RandomAgent
from src.environments.jsbsim.JSBSimEnv import Env # can be jsbsim.JSBSimEnv or xplane.XPlaneEnv
from src.scenarios.deltaAttitudeControlScene import Scene # can be deltaAttitudeControlScene, sparseAttitudeControlScene or cheatingAttitudeControlScene
experimentName = "Experiment"
connectAttempts = 0.0 # counts everytime the UDP packages are lost on a single retry
notes = "This experiment was run with..." # add notes that will be saved to the setup file to clearify the experiment setup better
dateTime = str(time.ctime(time.time()))
dateTime = dateTime.replace(":", "-")
dateTime = dateTime.replace(" ", "_")
experimentName = experimentName + "-" + dateTime
errors = 0.0 # counts everytime the UDP packages are lost on all retries
timeStart = time.time() # used to measure time
timeEnd = time.time() # used to measure time
logPeriod = 100 # every so many epochs the metrics will be printed into the console
savePeriod = 25 # every so many epochs the table/model will be saved to a file
movingRate = 1 # Is multiplied with savePeriod The rate at which the metrics will be averaged and saved and plotted.
pauseDelay = 0.1 # time an action is being applied to the environment
logDecimals = 0 # sets decimals for np.arrays to X for printing
np.set_printoptions(precision=logDecimals) # sets decimals for np.arrays to X for printing
n_epochs = 50_000 # Number of generations
n_steps = 1_000 # Number of inputs per generation
n_actions = 4 # Number of possible inputs to choose from
n_states = 182 # Number of states for non-Deep QLearning
gamma = 0.75 # The discount rate - between 0 an 1! if = 0 then no learning, ! The higher it is the more the new q will factor into the update of the q value
lr = 0.0001 # Learning Rate. Deep ~0.0001 / non-Deep ~0.01 - If LR is 0 then the Q value would not update. The higher the value the quicker the agent will adopt the NEW Q value. If lr = 1, the updated value would be exactly be the newly calculated q value, completely ignoring the previous one
epsilon = 1.0 # Starting Epsilon Rate, affects the exploration probability. Will decay
decayRate = 0.00001 # Rate at which epsilon will decay per step
epsilonMin = 0.1 # Minimum value at which epsilon will stop decaying
n_epochsBeforeDecay = 10 # number of games to be played before epsilon starts to decay
numOfInputs = 7 # Number of inputs fed to the model
stateDepth = 1 # Number of old observations kept for current state. State will consist of s(t) ... s(t_n)
minReplayMemSize = 1_000 # min size determines when the replay will start being used
replayMemSize = 100_000 # Max size for the replay buffer
batchSize = 256 # Batch size for the model
updateRate = 5 # update target model every so many episodes
startingOffset = 0 # is used if previous Results are loaded.
loadModel = False # will load "model.h5" for tf if True (model.npy for non-Deep)
loadMemory = False # will load "memory.pickle" if True
loadResults = False # will load "results.npy" if True
jsbRender = False # will send UDP data to flight gear for rendering if True
jsbRealTime = False # will slow down the physics to portrait real time rendering
usePredefinedSeeds = False # Sets seeds for tf, np and random for more replicable results (not fully replicable due to stochastic environments)
saveResultsToPlot = True # Saves results to png in the experiment folder at runetime
saveForAutoReload = False # Saves and overrides models, results and memory to the root
startingVelocity = 60
startingPitchRange = 10
startingRollRange = 15
randomDesiredState = True # Set a new state to stabalize towards every episode
desiredPitchRange = 5
desiredRollRange = 5
dictObservation = {
"lat": 0,
"long": 1,
"alt": 2,
"pitch": 3,
"roll": 4,
"yaw": 5,
"gear": 6}
dictAction = {
"pi+": 0,
"pi-": 1,
"ro+": 2,
"ro-": 3,
"ru+": 4,
"ru-": 5,
"no": 6}
dictErrors = {
"reset": 0,
"update": 0,
"step": 0}
dictRotation = {
"roll": 0,
"pitch": 1,
"yaw": 2,
"northVelo": 3,
"eastVelo": 4,
"verticalVelo": 5}
# -998->NO CHANGE
flightOrigin = [35.126, 126.809, 6000, 0, 0, 0, 1] # Gwangju SK
flightDestinaion = [33.508, 126.487, 6000, -998, -998, -998, 1] # Jeju SK
# Other locations to use: Memmingen: [47.988, 10.240], Chicago: [41.976, -87.902]
epochRewards = []
epochQs = []
movingRate = savePeriod * movingRate # gives the number by which the moving average will be done, best if n * savePeriod
movingEpRewards = {
"epoch": [],
"average": [],
"minimum": [],
"maximum": [],
"averageQ": [],
"epsilon": []}
fallbackState = [0] * numOfInputs # Used in case of connection error to XPlane
fallbackState = [tuple(fallbackState)]
# Will load previous results in case a experiment needs to be continued
if(loadResults):
movingEpRewards = np.load("results.npy", allow_pickle=True).item() # loads the file - .item() turns the loaded nparray back to a dict
startingOffset = np.max(movingEpRewards["epoch"]) # loads the episode where it previously stopped
epsilon = np.min(movingEpRewards["epsilon"]) # loads the epsilon where the previously experiment stopped
n_epochsBeforeDecay = max(0, n_epochsBeforeDecay - startingOffset) # sets n_epochsBeforeDecay to the according value - max makes it so it's not negative but 0
if(usePredefinedSeeds):
np.random.seed(42)
Q = QLearn(n_states, n_actions, gamma, lr, epsilon,
decayRate, epsilonMin, n_epochsBeforeDecay, experimentName, saveForAutoReload, loadModel, usePredefinedSeeds,
loadMemory, numOfInputs, minReplayMemSize, replayMemSize, batchSize, updateRate, stateDepth)
scene = Scene(dictObservation, dictAction, n_actions, stateDepth, startingVelocity, startingPitchRange, startingRollRange, usePredefinedSeeds, randomDesiredState, desiredPitchRange, desiredRollRange)
env = Env(scene, flightOrigin, flightDestinaion, n_actions, usePredefinedSeeds,
dictObservation, dictAction, dictRotation, startingVelocity, pauseDelay, Q.id, jsbRender, jsbRealTime)
# saving setup pre run
if not os.path.exists("./Experiments/" + experimentName):
os.makedirs("./Experiments/" + experimentName)
setup = f"{experimentName=}\n{Q.numGPUs=}\n{dateTime=}\nendTime=not yet defined - first save\n{Q.id=}\n{env.id=}\n{scene.id=}\n{pauseDelay=}\n{n_epochs=}\n"
setup += f"{n_steps=}\n{n_actions=}\n{n_states=} - states for non deep\n{gamma=}\n{lr=}\n{epsilon=}\n{decayRate=}\n{epsilonMin=}\n{n_epochsBeforeDecay=}\n"
setup += f"{numOfInputs=} - states for deep\n{minReplayMemSize=}\n{replayMemSize=}\n{batchSize=}\n{updateRate=}\n{loadModel=}\n{movingRate=}\n"
setup += f"{randomDesiredState=}\n{desiredRollRange=}\n{desiredPitchRange=}\n{startingRollRange=}\n{startingPitchRange=}\n{startingVelocity=}\n{stateDepth=}\n{Q.modelSummary=}\n{notes=}\n"
print(setup, file=open("./Experiments/" + str(experimentName) + "/setup.out", 'w')) # saves hyperparameters to the experiment folder
# prints out all metrics
def log(i_epoch, i_step, reward, logList):
global timeStart # Used to print time ellapsed between log calls
global timeEnd # Used to print time ellapsed between log calls
old_state = logList[0]
new_state = logList[1]
actions_binary = logList[3]
observation = logList[4]
control = logList[5]
explore = logList[6]
currentEpsilon = logList[7]
if(Q.id == "deep" or Q.id == "doubleDeep"):
depth = len(old_state)
depth = "Depth " + str(depth)
old_state = old_state[-1]
new_state = new_state[-1]
else:
depth = ""
timeEnd = time.time() # End timer here
print("\t\tGame ", i_epoch,
"\n\t\t\tMove ", i_step,
"\n\t\t\tStarting Rotation ", np.array(env.startingOrientation).round(logDecimals),
"\n\t\t\tDestination Rotation ", env.desiredState,
"\n\t\t\tTime taken ", timeEnd - timeStart,
"\n\t\t\tOld State ", np.array(old_state).round(logDecimals), depth,
"\n\t\t\tNew State ", np.array(new_state).round(logDecimals), depth,
"\n\t\t\t\t\t[p+,p-,r+,r-]",
"\n\t\t\tactions_binary = ", actions_binary,
"\n\t\t\tCurrent Control:", control,
"\n\t\t\tCurrent Qs:", Q.currentTable,
"\n\t\t\tCurrent Orientation: ", np.array(observation[dictObservation["pitch"]:dictObservation["gear"]]).round(logDecimals),
"\n\t\t\tCurrent AVE of QTable: ", np.average(Q.qTable),
"\n\t\t\tExplored (Random): ", explore,
"\n\t\t\tCurrent Epsilon: ", currentEpsilon,
"\n\t\t\tCurrent Reward: ", reward,
"\n\t\t\tReconnects Percentage & Count: ", float(connectAttempts / (i_epoch * n_steps + i_step + 1)), ",", connectAttempts,
"\n\t\t\tError Percentage & Count: ", float(errors / (i_epoch * n_steps + i_step + 1)), ",", errors,
"\n\t\t\tError Code: ", dictErrors, "\n")
timeStart = time.time() # Start timer here
# A single step(input), this will repeat n_steps times throughout a epoch
def step(i_step, done, reward, oldState):
global errors
global connectAttempts
if(Q.id == "deep" or Q.id == "doubleDeep"):
oldState = list(oldState)
action, explore, currentEpsilon = Q.selectAction(oldState, i_epoch, n_epochs)
# Check if connections can be established 10x
for attempt in range(10):
try:
newState, reward, done, info = env.step(action)
if(i_step == n_steps):
done = True # mark done if episode is finished
except socket.error as socketError: # the specific error for connections used by xpc
dictErrors["step"] = socketError
connectAttempts += 1
continue
else:
break
else: # if all 10 attempts fail
errors += 1
if(Q.id == "deep" or Q.id == "doubleDeep"):
newState = fallbackState
else:
newState = 0
reward = 0
done = False
info = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], 0]
pass # Error was in second loop
newPosition = info[0]
actions_binary = info[1]
control = info[2]
# checking if state includes a NaN (happens in JSBSim sometimes)
if(np.isnan(newState).any()):
if(Q.id == "deep" or Q.id == "doubleDeep"):
newState = fallbackState
else:
newState = 0
reward = 0
info = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], 0]
dictErrors["step"] = "NaN in state"
errors += 1
done = True
Q.learn(oldState, action, reward, newState, done)
logList = [oldState, newState, action, actions_binary, newPosition, control, explore, currentEpsilon]
if(Q.id == "deep" or Q.id == "doubleDeep"):
oldState = list(newState)
else:
oldState = newState
return done, reward, logList, oldState
# A epoch is one full run, from respawn/reset to the final step.
def epoch(i_epoch):
global errors
global connectAttempts
epochReward = 0
epochQ = 0
for attempt in range(25):
try:
oldState = env.reset()
except socket.error as socketError: # the specific error for connections used by xpc
dictErrors["reset"] = socketError
connectAttempts += 1
continue
else:
break
else: # if all 25 attempts fail
if(Q.id == "deep" or Q.id == "doubleDeep"):
oldState = fallbackState # Error was during reset
else:
oldState = 0
errors += 1
if(i_epoch % savePeriod == 0):
Q.archive(i_epoch)
done = False
reward = 0
for i_step in range(n_steps + 1):
done, reward, logList, oldState = step(i_step, done, reward, oldState)
epochReward += reward
epochQ += np.argmax(Q.currentTable)
if(i_step % logPeriod == 0): # log every logPeriod steps
log(i_epoch, i_step, reward, logList)
dictErrors["reset"], dictErrors["update"], dictErrors["step"] = [0, 0, 0]
if done:
break
epochRewards.append(epochReward)
epochQs.append(epochQ)
if(i_epoch % movingRate == 0 and i_epoch != 0):
movingEpRewards["epoch"].append(i_epoch)
averageReward = sum(epochRewards[-movingRate:]) / len(epochRewards[-movingRate:])
movingEpRewards["average"].append(averageReward)
movingEpRewards["minimum"].append(min(epochRewards[-movingRate:]))
movingEpRewards["maximum"].append(max(epochRewards[-movingRate:]))
averageQ = sum(epochQs[-movingRate:]) / len(epochQs[-movingRate:])
movingEpRewards["averageQ"].append(averageQ)
movingEpRewards["epsilon"].append(logList[7])
for i_epoch in range(startingOffset, startingOffset + n_epochs + 1):
epoch(i_epoch)
if(i_epoch % savePeriod == 0):
np.save("./Experiments/" + str(experimentName) + "/results" + str(i_epoch) + ".npy", movingEpRewards)
if(saveForAutoReload):
np.save("results.npy", movingEpRewards)
if(saveResultsToPlot and i_epoch % movingRate == 0):
plt.plot(movingEpRewards['epoch'], movingEpRewards['average'], label="average rewards")
plt.plot(movingEpRewards['epoch'], movingEpRewards['averageQ'], label="average Qs")
plt.plot(movingEpRewards['epoch'], movingEpRewards['maximum'], label="max rewards")
plt.plot(movingEpRewards['epoch'], movingEpRewards['minimum'], label="min rewards")
plt.plot(movingEpRewards['epoch'], movingEpRewards['epsilon'], label="epsilon")
plt.title("Results")
plt.xlabel("episodes")
plt.ylabel("reward")
plt.legend(loc=4)
plt.savefig("./Experiments/" + str(experimentName) + "/plot" + str(i_epoch) + ".png")
plt.clf()
np.save("./Experiments/" + str(experimentName) + "/results_final.npy", movingEpRewards)
endTime = str(time.ctime(time.time()))
# saving setup post run
setup = f"{experimentName=}\n{Q.numGPUs=}\n{dateTime=}\n{endTime=}\n{Q.id=}\n{env.id=}\n{scene.id=}\n{pauseDelay=}\n{n_epochs=}\n"
setup += f"{n_steps=}\n{n_actions=}\n{n_states=} - states for non deep\n{gamma=}\n{lr=}\n{epsilon=}\n{decayRate=}\n{epsilonMin=}\n{n_epochsBeforeDecay=}\n"
setup += f"{numOfInputs=} - states for deep\n{minReplayMemSize=}\n{replayMemSize=}\n{batchSize=}\n{updateRate=}\n{loadModel=}\n{movingRate=}\n"
setup += f"{randomDesiredState=}\n{desiredRollRange=}\n{desiredPitchRange=}\n{startingRollRange=}\n{startingPitchRange=}\n{startingVelocity=}\n{stateDepth=}\n{Q.modelSummary=}\n{notes=}\n"
print(setup, file=open("./Experiments/" + str(experimentName) + "/setup.out", 'w')) # saves hyperparameters to the experiment folder
print("<<<<<<<<<<<<<<<<<<<<DONE>>>>>>>>>>>>>>>>>>>>>") | [
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.save",
"src.algorithms.QDoubleDeepLearn.QLearn",
"os.path.exists",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.random.seed",
"numpy.min",
"src.environments.jsbsim.JSBSimEnv.Env",
"numpy.average",
"numpy.argmax",
... | [((955, 966), 'time.time', 'time.time', ([], {}), '()\n', (964, 966), False, 'import time\n'), ((1001, 1012), 'time.time', 'time.time', ([], {}), '()\n', (1010, 1012), False, 'import time\n'), ((1456, 1498), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': 'logDecimals'}), '(precision=logDecimals)\n', (1475, 1498), True, 'import numpy as np\n'), ((5594, 5856), 'src.algorithms.QDoubleDeepLearn.QLearn', 'QLearn', (['n_states', 'n_actions', 'gamma', 'lr', 'epsilon', 'decayRate', 'epsilonMin', 'n_epochsBeforeDecay', 'experimentName', 'saveForAutoReload', 'loadModel', 'usePredefinedSeeds', 'loadMemory', 'numOfInputs', 'minReplayMemSize', 'replayMemSize', 'batchSize', 'updateRate', 'stateDepth'], {}), '(n_states, n_actions, gamma, lr, epsilon, decayRate, epsilonMin,\n n_epochsBeforeDecay, experimentName, saveForAutoReload, loadModel,\n usePredefinedSeeds, loadMemory, numOfInputs, minReplayMemSize,\n replayMemSize, batchSize, updateRate, stateDepth)\n', (5600, 5856), False, 'from src.algorithms.QDoubleDeepLearn import QLearn\n'), ((5876, 6075), 'src.scenarios.deltaAttitudeControlScene.Scene', 'Scene', (['dictObservation', 'dictAction', 'n_actions', 'stateDepth', 'startingVelocity', 'startingPitchRange', 'startingRollRange', 'usePredefinedSeeds', 'randomDesiredState', 'desiredPitchRange', 'desiredRollRange'], {}), '(dictObservation, dictAction, n_actions, stateDepth, startingVelocity,\n startingPitchRange, startingRollRange, usePredefinedSeeds,\n randomDesiredState, desiredPitchRange, desiredRollRange)\n', (5881, 6075), False, 'from src.scenarios.deltaAttitudeControlScene import Scene\n'), ((6075, 6259), 'src.environments.jsbsim.JSBSimEnv.Env', 'Env', (['scene', 'flightOrigin', 'flightDestinaion', 'n_actions', 'usePredefinedSeeds', 'dictObservation', 'dictAction', 'dictRotation', 'startingVelocity', 'pauseDelay', 'Q.id', 'jsbRender', 'jsbRealTime'], {}), '(scene, flightOrigin, flightDestinaion, n_actions, usePredefinedSeeds,\n dictObservation, dictAction, dictRotation, startingVelocity, pauseDelay,\n Q.id, jsbRender, jsbRealTime)\n', (6078, 6259), False, 'from src.environments.jsbsim.JSBSimEnv import Env\n'), ((5185, 5217), 'numpy.max', 'np.max', (["movingEpRewards['epoch']"], {}), "(movingEpRewards['epoch'])\n", (5191, 5217), True, 'import numpy as np\n'), ((5281, 5315), 'numpy.min', 'np.min', (["movingEpRewards['epsilon']"], {}), "(movingEpRewards['epsilon'])\n", (5287, 5315), True, 'import numpy as np\n'), ((5570, 5588), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (5584, 5588), True, 'import numpy as np\n'), ((6293, 6342), 'os.path.exists', 'os.path.exists', (["('./Experiments/' + experimentName)"], {}), "('./Experiments/' + experimentName)\n", (6307, 6342), False, 'import os\n'), ((6348, 6394), 'os.makedirs', 'os.makedirs', (["('./Experiments/' + experimentName)"], {}), "('./Experiments/' + experimentName)\n", (6359, 6394), False, 'import os\n'), ((7830, 7841), 'time.time', 'time.time', ([], {}), '()\n', (7839, 7841), False, 'import time\n'), ((9150, 9161), 'time.time', 'time.time', ([], {}), '()\n', (9159, 9161), False, 'import time\n'), ((728, 739), 'time.time', 'time.time', ([], {}), '()\n', (737, 739), False, 'import time\n'), ((8664, 8684), 'numpy.average', 'np.average', (['Q.qTable'], {}), '(Q.qTable)\n', (8674, 8684), True, 'import numpy as np\n'), ((12055, 12080), 'numpy.argmax', 'np.argmax', (['Q.currentTable'], {}), '(Q.currentTable)\n', (12064, 12080), True, 'import numpy as np\n'), ((14186, 14197), 'time.time', 'time.time', ([], {}), '()\n', (14195, 14197), False, 'import time\n'), ((5047, 5088), 'numpy.load', 'np.load', (['"""results.npy"""'], {'allow_pickle': '(True)'}), "('results.npy', allow_pickle=True)\n", (5054, 5088), True, 'import numpy as np\n'), ((10468, 10486), 'numpy.isnan', 'np.isnan', (['newState'], {}), '(newState)\n', (10476, 10486), True, 'import numpy as np\n'), ((13239, 13278), 'numpy.save', 'np.save', (['"""results.npy"""', 'movingEpRewards'], {}), "('results.npy', movingEpRewards)\n", (13246, 13278), True, 'import numpy as np\n'), ((13352, 13444), 'matplotlib.pyplot.plot', 'plt.plot', (["movingEpRewards['epoch']", "movingEpRewards['average']"], {'label': '"""average rewards"""'}), "(movingEpRewards['epoch'], movingEpRewards['average'], label=\n 'average rewards')\n", (13360, 13444), True, 'import matplotlib.pyplot as plt\n'), ((13452, 13540), 'matplotlib.pyplot.plot', 'plt.plot', (["movingEpRewards['epoch']", "movingEpRewards['averageQ']"], {'label': '"""average Qs"""'}), "(movingEpRewards['epoch'], movingEpRewards['averageQ'], label=\n 'average Qs')\n", (13460, 13540), True, 'import matplotlib.pyplot as plt\n'), ((13548, 13636), 'matplotlib.pyplot.plot', 'plt.plot', (["movingEpRewards['epoch']", "movingEpRewards['maximum']"], {'label': '"""max rewards"""'}), "(movingEpRewards['epoch'], movingEpRewards['maximum'], label=\n 'max rewards')\n", (13556, 13636), True, 'import matplotlib.pyplot as plt\n'), ((13644, 13732), 'matplotlib.pyplot.plot', 'plt.plot', (["movingEpRewards['epoch']", "movingEpRewards['minimum']"], {'label': '"""min rewards"""'}), "(movingEpRewards['epoch'], movingEpRewards['minimum'], label=\n 'min rewards')\n", (13652, 13732), True, 'import matplotlib.pyplot as plt\n'), ((13740, 13819), 'matplotlib.pyplot.plot', 'plt.plot', (["movingEpRewards['epoch']", "movingEpRewards['epsilon']"], {'label': '"""epsilon"""'}), "(movingEpRewards['epoch'], movingEpRewards['epsilon'], label='epsilon')\n", (13748, 13819), True, 'import matplotlib.pyplot as plt\n'), ((13832, 13852), 'matplotlib.pyplot.title', 'plt.title', (['"""Results"""'], {}), "('Results')\n", (13841, 13852), True, 'import matplotlib.pyplot as plt\n'), ((13865, 13887), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""episodes"""'], {}), "('episodes')\n", (13875, 13887), True, 'import matplotlib.pyplot as plt\n'), ((13900, 13920), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""reward"""'], {}), "('reward')\n", (13910, 13920), True, 'import matplotlib.pyplot as plt\n'), ((13933, 13950), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (13943, 13950), True, 'import matplotlib.pyplot as plt\n'), ((14061, 14070), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (14068, 14070), True, 'import matplotlib.pyplot as plt\n'), ((7967, 8000), 'numpy.array', 'np.array', (['env.startingOrientation'], {}), '(env.startingOrientation)\n', (7975, 8000), True, 'import numpy as np\n'), ((8168, 8187), 'numpy.array', 'np.array', (['old_state'], {}), '(old_state)\n', (8176, 8187), True, 'import numpy as np\n'), ((8247, 8266), 'numpy.array', 'np.array', (['new_state'], {}), '(new_state)\n', (8255, 8266), True, 'import numpy as np\n'), ((8527, 8598), 'numpy.array', 'np.array', (["observation[dictObservation['pitch']:dictObservation['gear']]"], {}), "(observation[dictObservation['pitch']:dictObservation['gear']])\n", (8535, 8598), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from agents import TabularBasicAgent, capacities
class TabularMCAgent(TabularBasicAgent):
"""
Agent implementing tabular Q-learning.
"""
def set_agent_props(self):
self.discount = self.config['discount']
self.N0 = self.config['N0']
self.min_eps = self.config['min_eps']
self.initial_q_value = self.config['initial_q_value']
def get_best_config(self, env_name=""):
cartpolev0 = {
'discount': .99
, 'N0': 10
, 'min_eps': 0.001
, 'initial_q_value': 0
}
mountaincarv0 = {
'discount': 0.99
, 'N0': 10
, 'min_eps': 0.001
, 'initial_q_value': 0 # This is an optimistic initialization
}
acrobotv1 = {
"discount": 0.999
, "initial_q_value": 0 # This is an optimistic initialization
, "N0": 100
, "min_eps": 0.11409578938939571
}
return {
'CartPole-v0': cartpolev0
, 'MountainCar-v0': mountaincarv0
, 'Acrobot-v1': acrobotv1
}.get(env_name, cartpolev0)
@staticmethod
def get_random_config(fixed_params={}):
get_discount = lambda: 0.98 + (1 - 0.98) * np.random.random(1)[0]
get_N0 = lambda: np.random.randint(1, 1e3)
get_min_eps = lambda: 1e-4 + (2e-1 - 1e-4) * np.random.random(1)[0]
get_initial_q_value = lambda: 0
random_config = {
'discount': get_discount()
, 'N0': get_N0()
, 'min_eps': get_min_eps()
, 'initial_q_value': get_initial_q_value()
}
random_config.update(fixed_params)
return random_config
def build_graph(self, graph):
with graph.as_default():
tf.set_random_seed(self.random_seed)
self.inputs_plh = tf.placeholder(tf.int32, shape=[None], name="inputs_plh")
q_scope = tf.VariableScope(reuse=False, name='QValues')
with tf.variable_scope(q_scope):
self.Qs = tf.get_variable('Qs'
, shape=[self.nb_state, self.action_space.n]
, initializer=tf.constant_initializer(self.initial_q_value)
, dtype=tf.float32
)
tf.summary.histogram('Qarray', self.Qs)
self.q_preds_t = tf.gather(self.Qs, self.inputs_plh)
policy_scope = tf.VariableScope(reuse=False, name='Policy')
with tf.variable_scope(policy_scope):
if 'UCB' in self.config and self.config['UCB']:
self.actions_t, self.probs_t = capacities.tabular_UCB(
self.Qs, self.inputs_plh
)
else:
self.actions_t, self.probs_t = capacities.tabular_eps_greedy(
self.inputs_plh, self.q_preds_t, self.nb_state, self.env.action_space.n, self.N0, self.min_eps
)
self.action_t = self.actions_t[0]
self.q_value_t = self.q_preds_t[0][self.action_t]
learning_scope = tf.VariableScope(reuse=False, name='Learning')
with tf.variable_scope(learning_scope):
self.rewards_plh = tf.placeholder(tf.float32, shape=[None], name="rewards_plh")
self.targets_t = capacities.get_mc_target(self.rewards_plh, self.discount)
self.loss, self.train_op = capacities.tabular_learning(
self.Qs, self.inputs_plh, self.actions_t, self.targets_t
)
self.score_plh = tf.placeholder(tf.float32, shape=[])
self.score_sum_t = tf.summary.scalar('score', self.score_plh)
self.loss_plh = tf.placeholder(tf.float32, shape=[])
self.loss_sum_t = tf.summary.scalar('loss', self.loss_plh)
self.all_summary_t = tf.summary.merge_all()
self.episode_id, self.inc_ep_id_op = capacities.counter("episode_id")
# Playing part
self.pscore_plh = tf.placeholder(tf.float32, shape=[])
self.pscore_sum_t = tf.summary.scalar('play_score', self.pscore_plh)
return graph
def act(self, obs, done=False):
state_id = self.phi(obs, done)
act = self.sess.run(self.action_t, feed_dict={
self.inputs_plh: [ state_id ]
})
return act, state_id
def learn_from_episode(self, env, render=False):
score = 0
episodeType = np.dtype([('states', 'int32'), ('actions', 'int32'), ('rewards', 'float32')])
episode = np.array([], dtype=episodeType)
done = False
obs = env.reset()
while not done:
if render:
env.render()
act, state_id= self.act(obs)
obs, reward, done, info = env.step(act)
memory = np.array([(state_id, act, reward)], dtype=episodeType)
episode = np.append(episode, memory)
score += reward
_, loss = self.sess.run([self.train_op, self.loss], feed_dict={
self.inputs_plh: episode['states'],
self.actions_t: episode['actions'],
self.rewards_plh: episode['rewards'],
})
summary, _, episode_id = self.sess.run([self.all_summary_t, self.inc_ep_id_op, self.episode_id], feed_dict={
self.score_plh: score,
self.loss_plh: loss
})
self.sw.add_summary(summary, episode_id) | [
"agents.capacities.tabular_learning",
"numpy.array",
"tensorflow.VariableScope",
"tensorflow.set_random_seed",
"numpy.random.random",
"tensorflow.placeholder",
"agents.capacities.get_mc_target",
"tensorflow.summary.scalar",
"agents.capacities.tabular_UCB",
"numpy.dtype",
"tensorflow.summary.merg... | [((4573, 4650), 'numpy.dtype', 'np.dtype', (["[('states', 'int32'), ('actions', 'int32'), ('rewards', 'float32')]"], {}), "([('states', 'int32'), ('actions', 'int32'), ('rewards', 'float32')])\n", (4581, 4650), True, 'import numpy as np\n'), ((4669, 4700), 'numpy.array', 'np.array', (['[]'], {'dtype': 'episodeType'}), '([], dtype=episodeType)\n', (4677, 4700), True, 'import numpy as np\n'), ((1349, 1377), 'numpy.random.randint', 'np.random.randint', (['(1)', '(1000.0)'], {}), '(1, 1000.0)\n', (1366, 1377), True, 'import numpy as np\n'), ((1843, 1879), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['self.random_seed'], {}), '(self.random_seed)\n', (1861, 1879), True, 'import tensorflow as tf\n'), ((1911, 1968), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]', 'name': '"""inputs_plh"""'}), "(tf.int32, shape=[None], name='inputs_plh')\n", (1925, 1968), True, 'import tensorflow as tf\n'), ((2004, 2049), 'tensorflow.VariableScope', 'tf.VariableScope', ([], {'reuse': '(False)', 'name': '"""QValues"""'}), "(reuse=False, name='QValues')\n", (2020, 2049), True, 'import tensorflow as tf\n'), ((2497, 2541), 'tensorflow.VariableScope', 'tf.VariableScope', ([], {'reuse': '(False)', 'name': '"""Policy"""'}), "(reuse=False, name='Policy')\n", (2513, 2541), True, 'import tensorflow as tf\n'), ((3197, 3243), 'tensorflow.VariableScope', 'tf.VariableScope', ([], {'reuse': '(False)', 'name': '"""Learning"""'}), "(reuse=False, name='Learning')\n", (3213, 3243), True, 'import tensorflow as tf\n'), ((3681, 3717), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[]'}), '(tf.float32, shape=[])\n', (3695, 3717), True, 'import tensorflow as tf\n'), ((3749, 3791), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""score"""', 'self.score_plh'], {}), "('score', self.score_plh)\n", (3766, 3791), True, 'import tensorflow as tf\n'), ((3820, 3856), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[]'}), '(tf.float32, shape=[])\n', (3834, 3856), True, 'import tensorflow as tf\n'), ((3887, 3927), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'self.loss_plh'], {}), "('loss', self.loss_plh)\n", (3904, 3927), True, 'import tensorflow as tf\n'), ((3961, 3983), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (3981, 3983), True, 'import tensorflow as tf\n'), ((4034, 4066), 'agents.capacities.counter', 'capacities.counter', (['"""episode_id"""'], {}), "('episode_id')\n", (4052, 4066), False, 'from agents import TabularBasicAgent, capacities\n'), ((4125, 4161), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[]'}), '(tf.float32, shape=[])\n', (4139, 4161), True, 'import tensorflow as tf\n'), ((4194, 4242), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""play_score"""', 'self.pscore_plh'], {}), "('play_score', self.pscore_plh)\n", (4211, 4242), True, 'import tensorflow as tf\n'), ((4941, 4995), 'numpy.array', 'np.array', (['[(state_id, act, reward)]'], {'dtype': 'episodeType'}), '([(state_id, act, reward)], dtype=episodeType)\n', (4949, 4995), True, 'import numpy as np\n'), ((5018, 5044), 'numpy.append', 'np.append', (['episode', 'memory'], {}), '(episode, memory)\n', (5027, 5044), True, 'import numpy as np\n'), ((2067, 2093), 'tensorflow.variable_scope', 'tf.variable_scope', (['q_scope'], {}), '(q_scope)\n', (2084, 2093), True, 'import tensorflow as tf\n'), ((2360, 2399), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""Qarray"""', 'self.Qs'], {}), "('Qarray', self.Qs)\n", (2380, 2399), True, 'import tensorflow as tf\n'), ((2433, 2468), 'tensorflow.gather', 'tf.gather', (['self.Qs', 'self.inputs_plh'], {}), '(self.Qs, self.inputs_plh)\n', (2442, 2468), True, 'import tensorflow as tf\n'), ((2559, 2590), 'tensorflow.variable_scope', 'tf.variable_scope', (['policy_scope'], {}), '(policy_scope)\n', (2576, 2590), True, 'import tensorflow as tf\n'), ((3261, 3294), 'tensorflow.variable_scope', 'tf.variable_scope', (['learning_scope'], {}), '(learning_scope)\n', (3278, 3294), True, 'import tensorflow as tf\n'), ((3331, 3391), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None]', 'name': '"""rewards_plh"""'}), "(tf.float32, shape=[None], name='rewards_plh')\n", (3345, 3391), True, 'import tensorflow as tf\n'), ((3426, 3483), 'agents.capacities.get_mc_target', 'capacities.get_mc_target', (['self.rewards_plh', 'self.discount'], {}), '(self.rewards_plh, self.discount)\n', (3450, 3483), False, 'from agents import TabularBasicAgent, capacities\n'), ((3527, 3617), 'agents.capacities.tabular_learning', 'capacities.tabular_learning', (['self.Qs', 'self.inputs_plh', 'self.actions_t', 'self.targets_t'], {}), '(self.Qs, self.inputs_plh, self.actions_t, self.\n targets_t)\n', (3554, 3617), False, 'from agents import TabularBasicAgent, capacities\n'), ((2707, 2755), 'agents.capacities.tabular_UCB', 'capacities.tabular_UCB', (['self.Qs', 'self.inputs_plh'], {}), '(self.Qs, self.inputs_plh)\n', (2729, 2755), False, 'from agents import TabularBasicAgent, capacities\n'), ((2879, 3009), 'agents.capacities.tabular_eps_greedy', 'capacities.tabular_eps_greedy', (['self.inputs_plh', 'self.q_preds_t', 'self.nb_state', 'self.env.action_space.n', 'self.N0', 'self.min_eps'], {}), '(self.inputs_plh, self.q_preds_t, self.\n nb_state, self.env.action_space.n, self.N0, self.min_eps)\n', (2908, 3009), False, 'from agents import TabularBasicAgent, capacities\n'), ((1301, 1320), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (1317, 1320), True, 'import numpy as np\n'), ((1428, 1447), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (1444, 1447), True, 'import numpy as np\n'), ((2241, 2286), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.initial_q_value'], {}), '(self.initial_q_value)\n', (2264, 2286), True, 'import tensorflow as tf\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.