code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Modified code from https://github.com/nwojke/deep_sort
"""
from __future__ import absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from . import linear_assignment
from .track import Track
class Tracker:
"""
This is the multi-target tracker.
Parameters
----------
metric : nn_matching.NearestNeighborDistanceMetric
A distance metric for measurement-to-track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of consecutive detections before the track is confirmed. The
track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
Attributes
----------
metric : nn_matching.NearestNeighborDistanceMetric
The distance metric used for measurement to track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of frames that a track remains in initialization phase.
kf : kalman_filter.KalmanFilter
A Kalman filter to filter target trajectories in image space.
tracks : List[Track]
The list of active tracks at the current time step.
"""
def __init__(self, opt, metric, max_age=30, n_init=3, phalp_tracker=None, dims=None):
self.opt = opt
self.metric = metric
self.max_age = max_age
self.n_init = n_init
self.tracks = []
self._next_id = 1
self.tracked_cost = {}
self.phalp_tracker = phalp_tracker
if(dims is not None):
self.A_dim = dims[0]
self.P_dim = dims[1]
self.L_dim = dims[2]
def predict(self):
"""Propagate track state distributions one time step forward.
This function should be called once every time step, before `update`.
"""
for track in self.tracks:
track.predict(self.phalp_tracker, increase_age=True)
def update(self, detections, frame_t, image_name, shot):
"""Perform measurement update and track management.
Parameters
----------
detections : List[deep_sort.detection.Detection]
A list of detections at the current time step.
"""
# Run matching cascade.
matches, unmatched_tracks, unmatched_detections, statistics = self._match(detections)
self.tracked_cost[frame_t] = [statistics[0], matches, unmatched_tracks, unmatched_detections, statistics[1], statistics[2], statistics[3], statistics[4]]
if(self.opt.verbose): print(np.array(statistics[0]).astype(int))
for track_idx, detection_idx in matches:
self.tracks[track_idx].update(detections[detection_idx], detection_idx, shot)
self.accumulate_vectors([i[0] for i in matches], features=self.opt.predict)
for track_idx in unmatched_tracks:
self.tracks[track_idx].mark_missed()
self.accumulate_vectors(unmatched_tracks, features=self.opt.predict)
for detection_idx in unmatched_detections:
self._initiate_track(detections[detection_idx], detection_idx)
self.tracks = [t for t in self.tracks if not t.is_deleted()]
active_targets = [t.track_id for t in self.tracks if t.is_confirmed() or t.is_tentative()]
features, uv_maps, targets = [], [], []
for track in self.tracks:
if not (track.is_confirmed() or track.is_tentative()): continue
features += [track.phalp_features]
uv_maps += [track.phalp_uv_predicted]
targets += [track.track_id]
self.metric.partial_fit(np.asarray(features), np.asarray(uv_maps), np.asarray(targets), active_targets)
return matches
def _match(self, detections):
def gated_metric(tracks, dets, track_indices, detection_indices):
features = np.array([dets[i].feature for i in detection_indices])
uv_maps = np.array([dets[i].uv_map for i in detection_indices])
targets = np.array([tracks[i].track_id for i in track_indices])
cost_matrix = self.metric.distance([features, uv_maps], targets, dims=[self.A_dim, self.P_dim, self.L_dim], phalp_tracker=self.phalp_tracker)
return cost_matrix
# Split track set into confirmed and unconfirmed tracks.
confirmed_tracks = [i for i, t in enumerate(self.tracks) if t.is_confirmed() or t.is_tentative()]
# Associate confirmed tracks using appearance features.
matches, unmatched_tracks, unmatched_detections, cost_matrix = linear_assignment.matching_simple( gated_metric, self.metric.matching_threshold, self.max_age, self.tracks, detections, confirmed_tracks)
track_gt = [t.detection_data[-1]['ground_truth'] for i, t in enumerate(self.tracks) if t.is_confirmed() or t.is_tentative()]
detect_gt = [d.detection_data['ground_truth'] for i, d in enumerate(detections)]
track_idt = [i for i, t in enumerate(self.tracks) if t.is_confirmed() or t.is_tentative()]
detect_idt = [i for i, d in enumerate(detections)]
if(self.opt.use_gt):
matches = []
for t_, t_gt in enumerate(track_gt):
for d_, d_gt in enumerate(detect_gt):
if(t_gt==d_gt): matches.append([t_, d_])
t_pool = [t_ for (t_, _) in matches]
d_pool = [d_ for (_, d_) in matches]
unmatched_tracks = [t_ for t_ in track_idt if t_ not in t_pool]
unmatched_detections = [d_ for d_ in detect_idt if d_ not in d_pool]
return matches, unmatched_tracks, unmatched_detections, [cost_matrix, track_gt, detect_gt, track_idt, detect_idt]
return matches, unmatched_tracks, unmatched_detections, [cost_matrix, track_gt, detect_gt, track_idt, detect_idt]
def _initiate_track(self, detection, detection_id):
new_track = Track(self.opt, self._next_id, self.n_init, self.max_age,
feature=detection.feature,
uv_map=detection.uv_map,
bbox=detection.tlwh,
detection_data=detection.detection_data,
confidence=[detection.confidence_c],
detection_id=detection_id,
dims=[self.A_dim, self.P_dim, self.L_dim],
time=detection.time)
new_track.add_predicted()
self.tracks.append(new_track)
self._next_id += 1
def accumulate_vectors(self, track_ids, features="APL"):
a_features = []; p_features = []; l_features = []; t_features = []; l_time = []; confidence = []; is_tracks = 0; p_data = []
for track_idx in track_ids:
t_features.append(self.tracks[track_idx].phalp_time_features)
l_time.append(self.tracks[track_idx].time_since_update)
if("L" in features): l_features.append(self.tracks[track_idx].phalp_loca_features)
if("P" in features): p_features.append(self.tracks[track_idx].phalp_pose_features)
if("P" in features): t_id = self.tracks[track_idx].track_id; p_data.append([[data['xy'][0], data['xy'][1], data['scale'], data['scale'], data['time'], t_id] for data in self.tracks[track_idx].detection_data])
if("L" in features): confidence.append(self.tracks[track_idx].confidence_c)
is_tracks = 1
l_time = np.array(l_time)
t_features = np.array(t_features)
if("P" in features): p_features = np.array(p_features)
if("P" in features): p_data = np.array(p_data)
if("L" in features): l_features = np.array(l_features)
if("L" in features): confidence = np.array(confidence)
if(is_tracks):
with torch.no_grad():
if("P" in features): p_pred = self.phalp_tracker.forward_for_tracking([p_features, p_data, t_features], "P", l_time)
if("L" in features): l_pred = self.phalp_tracker.forward_for_tracking([l_features, t_features, confidence], "L", l_time)
for p_id, track_idx in enumerate(track_ids):
self.tracks[track_idx].add_predicted(pose=p_pred[p_id] if("P" in features) else None,
loca=l_pred[p_id] if("L" in features) else None)
| [
"numpy.asarray",
"torch.no_grad",
"numpy.array"
] | [((7857, 7873), 'numpy.array', 'np.array', (['l_time'], {}), '(l_time)\n', (7865, 7873), True, 'import numpy as np\n'), ((7899, 7919), 'numpy.array', 'np.array', (['t_features'], {}), '(t_features)\n', (7907, 7919), True, 'import numpy as np\n'), ((3830, 3850), 'numpy.asarray', 'np.asarray', (['features'], {}), '(features)\n', (3840, 3850), True, 'import numpy as np\n'), ((3852, 3871), 'numpy.asarray', 'np.asarray', (['uv_maps'], {}), '(uv_maps)\n', (3862, 3871), True, 'import numpy as np\n'), ((3873, 3892), 'numpy.asarray', 'np.asarray', (['targets'], {}), '(targets)\n', (3883, 3892), True, 'import numpy as np\n'), ((4097, 4151), 'numpy.array', 'np.array', (['[dets[i].feature for i in detection_indices]'], {}), '([dets[i].feature for i in detection_indices])\n', (4105, 4151), True, 'import numpy as np\n'), ((4184, 4237), 'numpy.array', 'np.array', (['[dets[i].uv_map for i in detection_indices]'], {}), '([dets[i].uv_map for i in detection_indices])\n', (4192, 4237), True, 'import numpy as np\n'), ((4271, 4324), 'numpy.array', 'np.array', (['[tracks[i].track_id for i in track_indices]'], {}), '([tracks[i].track_id for i in track_indices])\n', (4279, 4324), True, 'import numpy as np\n'), ((7966, 7986), 'numpy.array', 'np.array', (['p_features'], {}), '(p_features)\n', (7974, 7986), True, 'import numpy as np\n'), ((8033, 8049), 'numpy.array', 'np.array', (['p_data'], {}), '(p_data)\n', (8041, 8049), True, 'import numpy as np\n'), ((8096, 8116), 'numpy.array', 'np.array', (['l_features'], {}), '(l_features)\n', (8104, 8116), True, 'import numpy as np\n'), ((8163, 8183), 'numpy.array', 'np.array', (['confidence'], {}), '(confidence)\n', (8171, 8183), True, 'import numpy as np\n'), ((8224, 8239), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8237, 8239), False, 'import torch\n'), ((2706, 2729), 'numpy.array', 'np.array', (['statistics[0]'], {}), '(statistics[0])\n', (2714, 2729), True, 'import numpy as np\n')] |
import numpy as np
from collections import Sequence
from .__about__ import *
class RingBuffer(Sequence):
def __init__(self, capacity, dtype=float, allow_overwrite=True):
"""
Create a new ring buffer with the given capacity and element type
Parameters
----------
capacity: int
The maximum capacity of the ring buffer
dtype: data-type, optional
Desired type of buffer elements. Use a type like (float, 2) to
produce a buffer with shape (N, 2)
allow_overwrite: bool
If false, throw an IndexError when trying to append to an already
full buffer
"""
self._arr = np.empty(capacity, dtype)
self._left_index = 0
self._right_index = 0
self._capacity = capacity
self._allow_overwrite = allow_overwrite
def _unwrap(self):
""" Copy the data from this buffer into unwrapped form """
return np.concatenate((
self._arr[self._left_index:min(self._right_index, self._capacity)],
self._arr[:max(self._right_index - self._capacity, 0)]
))
def _fix_indices(self):
"""
Enforce our invariant that 0 <= self._left_index < self._capacity
"""
if self._left_index >= self._capacity:
self._left_index -= self._capacity
self._right_index -= self._capacity
elif self._left_index < 0:
self._left_index += self._capacity
self._right_index += self._capacity
@property
def is_full(self):
""" True if there is no more space in the buffer """
return len(self) == self._capacity
# numpy compatibility
def __array__(self):
return self._unwrap()
@property
def dtype(self):
return self._arr.dtype
@property
def shape(self):
return (len(self),) + self._arr.shape[1:]
# these mirror methods from deque
@property
def maxlen(self):
return self._capacity
def append(self, value):
if self.is_full:
if not self._allow_overwrite:
raise IndexError('append to a full RingBuffer with overwrite disabled')
elif not len(self):
return
else:
self._left_index += 1
self._arr[self._right_index % self._capacity] = value
self._right_index += 1
self._fix_indices()
def appendleft(self, value):
if self.is_full:
if not self._allow_overwrite:
raise IndexError('append to a full RingBuffer with overwrite disabled')
elif not len(self):
return
else:
self._right_index -= 1
self._left_index -= 1
self._fix_indices()
self._arr[self._left_index] = value
def pop(self):
if len(self) == 0:
raise IndexError("pop from an empty RingBuffer")
self._right_index -= 1
self._fix_indices()
res = self._arr[self._right_index % self._capacity]
return res
def popleft(self):
if len(self) == 0:
raise IndexError("pop from an empty RingBuffer")
res = self._arr[self._left_index]
self._left_index += 1
self._fix_indices()
return res
def extend(self, values):
lv = len(values)
if len(self) + lv > self._capacity:
if not self._allow_overwrite:
raise IndexError('extend a RingBuffer such that it would overflow, with overwrite disabled')
elif not len(self):
return
if lv >= self._capacity:
# wipe the entire array! - this may not be threadsafe
self._arr[...] = values[-self._capacity:]
self._right_index = self._capacity
self._left_index = 0
return
ri = self._right_index % self._capacity
sl1 = np.s_[ri:min(ri + lv, self._capacity)]
sl2 = np.s_[:max(ri + lv - self._capacity, 0)]
self._arr[sl1] = values[:sl1.stop - sl1.start]
self._arr[sl2] = values[sl1.stop - sl1.start:]
self._right_index += lv
self._left_index = max(self._left_index, self._right_index - self._capacity)
self._fix_indices()
def extendleft(self, values):
lv = len(values)
if len(self) + lv > self._capacity:
if not self._allow_overwrite:
raise IndexError('extend a RingBuffer such that it would overflow, with overwrite disabled')
elif not len(self):
return
if lv >= self._capacity:
# wipe the entire array! - this may not be threadsafe
self._arr[...] = values[:self._capacity]
self._right_index = self._capacity
self._left_index = 0
return
self._left_index -= lv
self._fix_indices()
li = self._left_index
sl1 = np.s_[li:min(li + lv, self._capacity)]
sl2 = np.s_[:max(li + lv - self._capacity, 0)]
self._arr[sl1] = values[:sl1.stop - sl1.start]
self._arr[sl2] = values[sl1.stop - sl1.start:]
self._right_index = min(self._right_index, self._left_index + self._capacity)
# implement Sequence methods
def __len__(self):
return self._right_index - self._left_index
def __getitem__(self, item):
if len(self) == 0:
raise IndexError("Buffer is empty")
# handle simple (b[1]) and basic (b[np.array([1, 2, 3])]) fancy indexing specially
if not isinstance(item, tuple):
item_arr = np.asarray(item)
if issubclass(item_arr.dtype.type, np.integer):
if self.is_full:
item_arr = (item_arr + self._left_index) % self._capacity
else:
item_arr = ((item_arr + self._left_index)
% self._right_index)
return self._arr[item_arr]
# for everything else, get it right at the expense of efficiency
return self._unwrap()[item]
def __iter__(self):
# alarmingly, this is comparable in speed to using itertools.chain
return iter(self._unwrap())
# Everything else
def __repr__(self):
return '<RingBuffer of {!r}>'.format(np.asarray(self))
| [
"numpy.empty",
"numpy.asarray"
] | [((594, 619), 'numpy.empty', 'np.empty', (['capacity', 'dtype'], {}), '(capacity, dtype)\n', (602, 619), True, 'import numpy as np\n'), ((4695, 4711), 'numpy.asarray', 'np.asarray', (['item'], {}), '(item)\n', (4705, 4711), True, 'import numpy as np\n'), ((5263, 5279), 'numpy.asarray', 'np.asarray', (['self'], {}), '(self)\n', (5273, 5279), True, 'import numpy as np\n')] |
"""
====================================================
Chebyshev Series (:mod:`numpy.polynomial.chebyshev`)
====================================================
This module provides a number of objects (mostly functions) useful for
dealing with Chebyshev series, including a `Chebyshev` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Classes
-------
.. autosummary::
:toctree: generated/
Chebyshev
Constants
---------
.. autosummary::
:toctree: generated/
chebdomain
chebzero
chebone
chebx
Arithmetic
----------
.. autosummary::
:toctree: generated/
chebadd
chebsub
chebmulx
chebmul
chebdiv
chebpow
chebval
chebval2d
chebval3d
chebgrid2d
chebgrid3d
Calculus
--------
.. autosummary::
:toctree: generated/
chebder
chebint
Misc Functions
--------------
.. autosummary::
:toctree: generated/
chebfromroots
chebroots
chebvander
chebvander2d
chebvander3d
chebgauss
chebweight
chebcompanion
chebfit
chebpts1
chebpts2
chebtrim
chebline
cheb2poly
poly2cheb
chebinterpolate
See also
--------
`numpy.polynomial`
Notes
-----
The implementations of multiplication, division, integration, and
differentiation use the algebraic identities [1]_:
.. math ::
T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\
z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}.
where
.. math :: x = \\frac{z + z^{-1}}{2}.
These identities allow a Chebyshev series to be expressed as a finite,
symmetric Laurent series. In this module, this sort of Laurent series
is referred to as a "z-series."
References
----------
.. [1] <NAME>, et al., "Combinatorial Trigonometry with Chebyshev
Polynomials," *Journal of Statistical Planning and Inference 14*, 2008
(https://web.archive.org/web/20080221202153/https://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
"""
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd',
'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval',
'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots',
'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1',
'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d',
'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion',
'chebgauss', 'chebweight', 'chebinterpolate']
chebtrim = pu.trimcoef
#
# A collection of functions for manipulating z-series. These are private
# functions and do minimal error checking.
#
def _cseries_to_zseries(c):
"""Covert Chebyshev series to z-series.
Covert a Chebyshev series to the equivalent z-series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high
Returns
-------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
"""
n = c.size
zs = np.zeros(2*n-1, dtype=c.dtype)
zs[n-1:] = c/2
return zs + zs[::-1]
def _zseries_to_cseries(zs):
"""Covert z-series to a Chebyshev series.
Covert a z series to the equivalent Chebyshev series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
Returns
-------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high.
"""
n = (zs.size + 1)//2
c = zs[n-1:].copy()
c[1:n] *= 2
return c
def _zseries_mul(z1, z2):
"""Multiply two z-series.
Multiply two z-series to produce a z-series.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D but this is not checked.
Returns
-------
product : 1-D ndarray
The product z-series.
Notes
-----
This is simply convolution. If symmetric/anti-symmetric z-series are
denoted by S/A then the following rules apply:
S*S, A*A -> S
S*A, A*S -> A
"""
return np.convolve(z1, z2)
def _zseries_div(z1, z2):
"""Divide the first z-series by the second.
Divide `z1` by `z2` and return the quotient and remainder as z-series.
Warning: this implementation only applies when both z1 and z2 have the
same symmetry, which is sufficient for present purposes.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D and have the same symmetry, but this is not
checked.
Returns
-------
(quotient, remainder) : 1-D ndarrays
Quotient and remainder as z-series.
Notes
-----
This is not the same as polynomial division on account of the desired form
of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A
then the following rules apply:
S/S -> S,S
A/A -> S,A
The restriction to types of the same symmetry could be fixed but seems like
unneeded generality. There is no natural form for the remainder in the case
where there is no symmetry.
"""
z1 = z1.copy()
z2 = z2.copy()
lc1 = len(z1)
lc2 = len(z2)
if lc2 == 1:
z1 /= z2
return z1, z1[:1]*0
elif lc1 < lc2:
return z1[:1]*0, z1
else:
dlen = lc1 - lc2
scl = z2[0]
z2 /= scl
quo = np.empty(dlen + 1, dtype=z1.dtype)
i = 0
j = dlen
while i < j:
r = z1[i]
quo[i] = z1[i]
quo[dlen - i] = r
tmp = r*z2
z1[i:i+lc2] -= tmp
z1[j:j+lc2] -= tmp
i += 1
j -= 1
r = z1[i]
quo[i] = r
tmp = r*z2
z1[i:i+lc2] -= tmp
quo /= scl
rem = z1[i+1:i-1+lc2].copy()
return quo, rem
def _zseries_der(zs):
"""Differentiate a z-series.
The derivative is with respect to x, not z. This is achieved using the
chain rule and the value of dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to differentiate.
Returns
-------
derivative : z-series
The derivative
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
multiplying the value of zs by two also so that the two cancels in the
division.
"""
n = len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs *= np.arange(-n, n+1)*2
d, r = _zseries_div(zs, ns)
return d
def _zseries_int(zs):
"""Integrate a z-series.
The integral is with respect to x, not z. This is achieved by a change
of variable using dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to integrate
Returns
-------
integral : z-series
The indefinite integral
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
dividing the resulting zs by two.
"""
n = 1 + len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs = _zseries_mul(zs, ns)
div = np.arange(-n, n+1)*2
zs[:n] /= div[:n]
zs[n+1:] /= div[n+1:]
zs[n] = 0
return zs
#
# Chebyshev series functions
#
def poly2cheb(pol):
"""
Convert a polynomial to a Chebyshev series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Chebyshev series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Chebyshev
series.
See Also
--------
cheb2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(range(4))
>>> p
Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> c = p.convert(kind=P.Chebyshev)
>>> c
Chebyshev([1. , 3.25, 1. , 0.75], domain=[-1., 1.], window=[-1., 1.])
>>> P.chebyshev.poly2cheb(range(4))
array([1. , 3.25, 1. , 0.75])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = chebadd(chebmulx(res), pol[i])
return res
def cheb2poly(c):
"""
Convert a Chebyshev series to a polynomial.
Convert an array representing the coefficients of a Chebyshev series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Chebyshev series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2cheb
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> c = P.Chebyshev(range(4))
>>> c
Chebyshev([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([-2., -8., 4., 12.], domain=[-1., 1.], window=[-1., 1.])
>>> P.chebyshev.cheb2poly(range(4))
array([-2., -8., 4., 12.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1)
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Chebyshev default domain.
chebdomain = np.array([-1, 1])
# Chebyshev coefficients representing zero.
chebzero = np.array([0])
# Chebyshev coefficients representing one.
chebone = np.array([1])
# Chebyshev coefficients representing the identity x.
chebx = np.array([0, 1])
def chebline(off, scl):
"""
Chebyshev series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Chebyshev series for
``off + scl*x``.
See Also
--------
polyline
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebline(3,2)
array([3, 2])
>>> C.chebval(-3, C.chebline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def chebfromroots(roots):
"""
Generate a Chebyshev series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Chebyshev form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Chebyshev form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, hermfromroots, hermefromroots
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.25, 0. , 0.25])
>>> j = complex(0,1)
>>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([1.5+0.j, 0. +0.j, 0.5+0.j])
"""
return pu._fromroots(chebline, chebmul, roots)
def chebadd(c1, c2):
"""
Add one Chebyshev series to another.
Returns the sum of two Chebyshev series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Chebyshev series of their sum.
See Also
--------
chebsub, chebmulx, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Chebyshev series
is a Chebyshev series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebadd(c1,c2)
array([4., 4., 4.])
"""
return pu._add(c1, c2)
def chebsub(c1, c2):
"""
Subtract one Chebyshev series from another.
Returns the difference of two Chebyshev series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their difference.
See Also
--------
chebadd, chebmulx, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Chebyshev
series is a Chebyshev series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebsub(c1,c2)
array([-2., 0., 2.])
>>> C.chebsub(c2,c1) # -C.chebsub(c1,c2)
array([ 2., 0., -2.])
"""
return pu._sub(c1, c2)
def chebmulx(c):
"""Multiply a Chebyshev series by x.
Multiply the polynomial `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
.. versionadded:: 1.5.0
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> C.chebmulx([1,2,3])
array([1. , 2.5, 1. , 1.5])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
if len(c) > 1:
tmp = c[1:]/2
prd[2:] = tmp
prd[0:-2] += tmp
return prd
def chebmul(c1, c2):
"""
Multiply one Chebyshev series by another.
Returns the product of two Chebyshev series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their product.
See Also
--------
chebadd, chebsub, chebmulx, chebdiv, chebpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Chebyshev polynomial basis set. Thus, to express
the product as a C-series, it is typically necessary to "reproject"
the product onto said basis set, which typically produces
"unintuitive live" (but correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebmul(c1,c2) # multiplication requires "reprojection"
array([ 6.5, 12. , 12. , 4. , 1.5])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
prd = _zseries_mul(z1, z2)
ret = _zseries_to_cseries(prd)
return pu.trimseq(ret)
def chebdiv(c1, c2):
"""
Divide one Chebyshev series by another.
Returns the quotient-with-remainder of two Chebyshev series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Chebyshev series coefficients representing the quotient and
remainder.
See Also
--------
chebadd, chebsub, chemulx, chebmul, chebpow
Notes
-----
In general, the (polynomial) division of one C-series by another
results in quotient and remainder terms that are not in the Chebyshev
polynomial basis set. Thus, to express these results as C-series, it
is typically necessary to "reproject" the results onto said basis
set, which typically produces "unintuitive" (but correct) results;
see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not
(array([3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> C.chebdiv(c2,c1) # neither "intuitive"
(array([0., 2.]), array([-2., -4.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
# note: this is more efficient than `pu._div(chebmul, c1, c2)`
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
quo, rem = _zseries_div(z1, z2)
quo = pu.trimseq(_zseries_to_cseries(quo))
rem = pu.trimseq(_zseries_to_cseries(rem))
return quo, rem
def chebpow(c, pow, maxpower=16):
"""Raise a Chebyshev series to a power.
Returns the Chebyshev series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Chebyshev series of power.
See Also
--------
chebadd, chebsub, chebmulx, chebmul, chebdiv
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> C.chebpow([1, 2, 3, 4], 2)
array([15.5, 22. , 16. , ..., 12.5, 12. , 8. ])
"""
# note: this is more efficient than `pu._pow(chebmul, c1, c2)`, as it
# avoids converting between z and c series repeatedly
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
zs = _cseries_to_zseries(c)
prd = zs
for i in range(2, power + 1):
prd = np.convolve(prd, zs)
return _zseries_to_cseries(prd)
def chebder(c, m=1, scl=1, axis=0):
"""
Differentiate a Chebyshev series.
Returns the Chebyshev series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2``
while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) +
2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Chebyshev series of the derivative.
See Also
--------
chebint
Notes
-----
In general, the result of differentiating a C-series needs to be
"reprojected" onto the C-series basis set. Thus, typically, the
result of this function is "unintuitive," albeit correct; see Examples
section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3,4)
>>> C.chebder(c)
array([14., 12., 24.])
>>> C.chebder(c,3)
array([96.])
>>> C.chebder(c,scl=-1)
array([-14., -12., -24.])
>>> C.chebder(c,2,-1)
array([12., 96.])
"""
c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt = pu._deprecate_as_int(m, "the order of derivation")
iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j)*c[j]
c[j - 2] += (j*c[j])/(j - 2)
if n > 1:
der[1] = 4*c[2]
der[0] = c[1]
c = der
c = np.moveaxis(c, 0, iaxis)
return c
def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Chebyshev series.
Returns the Chebyshev series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]]
represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) +
2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
C-series coefficients of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
``np.ndim(scl) != 0``.
See Also
--------
chebder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a`- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3)
>>> C.chebint(c)
array([ 0.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,3)
array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, # may vary
0.00625 ])
>>> C.chebint(c, k=3)
array([ 3.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,lbnd=-2)
array([ 8.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,scl=-2)
array([-1., 1., -1., -1.])
"""
c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt = pu._deprecate_as_int(m, "the order of integration")
iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if np.ndim(lbnd) != 0:
raise ValueError("lbnd must be a scalar.")
if np.ndim(scl) != 0:
raise ValueError("scl must be a scalar.")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/4
for j in range(2, n):
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[j - 1] -= c[j]/(2*(j - 1))
tmp[0] += k[i] - chebval(lbnd, tmp)
c = tmp
c = np.moveaxis(c, 0, iaxis)
return c
def chebval(x, c, tensor=True):
"""
Evaluate a Chebyshev series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
chebval2d, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
x2 = 2*x
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
c0 = c[-i] - c1
c1 = tmp + c1*x2
return c0 + c1*x
def chebval2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than 2 the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
chebval, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._valnd(chebval, c, x, y)
def chebgrid2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * T_i(a) * T_j(b),
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._gridnd(chebval, c, x, y)
def chebval3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebgrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._valnd(chebval, c, x, y, z)
def chebgrid3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebval3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._gridnd(chebval, c, x, y, z)
def chebvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = T_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Chebyshev polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and
``chebval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Chebyshev series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Chebyshev polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = pu._deprecate_as_int(deg, "deg")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries.
v[0] = x*0 + 1
if ideg > 0:
x2 = 2*x
v[1] = x
for i in range(2, ideg + 1):
v[i] = v[i-1]*x2 - v[i-2]
return np.moveaxis(v, 0, -1)
def chebvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (deg[1] + 1)*i + j] = T_i(x) * T_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Chebyshev polynomials.
If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
chebvander, chebvander3d, chebval2d, chebval3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._vander_nd_flat((chebvander, chebvander), (x, y), deg)
def chebvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Chebyshev polynomials.
If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
chebvander, chebvander3d, chebval2d, chebval3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._vander_nd_flat((chebvander, chebvander, chebvander), (x, y, z), deg)
def chebfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Chebyshev series to data.
Return the coefficients of a Chebyshev series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer,
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Chebyshev coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyfit, legfit, lagfit, hermfit, hermefit
chebval : Evaluates a Chebyshev series.
chebvander : Vandermonde matrix of Chebyshev series.
chebweight : Chebyshev weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Chebyshev series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Chebyshev series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
https://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
return pu._fit(chebvander, x, y, deg, rcond, full, w)
def chebcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is a Chebyshev basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded:: 1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.array([1.] + [np.sqrt(.5)]*(n-1))
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[0] = np.sqrt(.5)
top[1:] = 1/2
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5
return mat
def chebroots(c):
"""
Compute the roots of a Chebyshev series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * T_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Chebyshev series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.chebyshev as cheb
>>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots
array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) # may vary
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
# rotated companion matrix reduces error
m = chebcompanion(c)[::-1,::-1]
r = la.eigvals(m)
r.sort()
return r
def chebinterpolate(func, deg, args=()):
"""Interpolate a function at the Chebyshev points of the first kind.
Returns the Chebyshev series that interpolates `func` at the Chebyshev
points of the first kind in the interval [-1, 1]. The interpolating
series tends to a minmax approximation to `func` with increasing `deg`
if the function is continuous in the interval.
.. versionadded:: 1.14.0
Parameters
----------
func : function
The function to be approximated. It must be a function of a single
variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are
extra arguments passed in the `args` parameter.
deg : int
Degree of the interpolating polynomial
args : tuple, optional
Extra arguments to be used in the function call. Default is no extra
arguments.
Returns
-------
coef : ndarray, shape (deg + 1,)
Chebyshev coefficients of the interpolating series ordered from low to
high.
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromfunction(lambda x: np.tanh(x) + 0.5, 8)
array([ 5.00000000e-01, 8.11675684e-01, -9.86864911e-17,
-5.42457905e-02, -2.71387850e-16, 4.51658839e-03,
2.46716228e-17, -3.79694221e-04, -3.26899002e-16])
Notes
-----
The Chebyshev polynomials used in the interpolation are orthogonal when
sampled at the Chebyshev points of the first kind. If it is desired to
constrain some of the coefficients they can simply be set to the desired
value after the interpolation, no new interpolation or fit is needed. This
is especially useful if it is known apriori that some of coefficients are
zero. For instance, if the function is even then the coefficients of the
terms of odd degree in the result can be set to zero.
"""
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 0 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int")
if deg < 0:
raise ValueError("expected deg >= 0")
order = deg + 1
xcheb = chebpts1(order)
yfunc = func(xcheb, *args)
m = chebvander(xcheb, deg)
c = np.dot(m.T, yfunc)
c[0] /= order
c[1:] /= 0.5*order
return c
def chebgauss(deg):
"""
Gauss-Chebyshev quadrature.
Computes the sample points and weights for Gauss-Chebyshev quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. For Gauss-Chebyshev there are closed form solutions for
the sample points and weights. If n = `deg`, then
.. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n))
.. math:: w_i = \\pi / n
"""
ideg = pu._deprecate_as_int(deg, "deg")
if ideg <= 0:
raise ValueError("deg must be a positive integer")
x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg))
w = np.ones(ideg)*(np.pi/ideg)
return x, w
def chebweight(x):
"""
The weight function of the Chebyshev polynomials.
The weight function is :math:`1/\\sqrt{1 - x^2}` and the interval of
integration is :math:`[-1, 1]`. The Chebyshev polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded:: 1.7.0
"""
w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x))
return w
def chebpts1(npts):
"""
Chebyshev points of the first kind.
The Chebyshev points of the first kind are the points ``cos(x)``,
where ``x = [pi*(k + .5)/npts for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the first kind.
See Also
--------
chebpts2
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 1:
raise ValueError("npts must be >= 1")
x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts)
return np.cos(x)
def chebpts2(npts):
"""
Chebyshev points of the second kind.
The Chebyshev points of the second kind are the points ``cos(x)``,
where ``x = [pi*k/(npts - 1) for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the second kind.
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 2:
raise ValueError("npts must be >= 2")
x = np.linspace(-np.pi, 0, _npts)
return np.cos(x)
#
# Chebyshev series class
#
class Chebyshev(ABCPolyBase):
"""A Chebyshev series class.
The Chebyshev class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
methods listed below.
Parameters
----------
coef : array_like
Chebyshev coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(chebadd)
_sub = staticmethod(chebsub)
_mul = staticmethod(chebmul)
_div = staticmethod(chebdiv)
_pow = staticmethod(chebpow)
_val = staticmethod(chebval)
_int = staticmethod(chebint)
_der = staticmethod(chebder)
_fit = staticmethod(chebfit)
_line = staticmethod(chebline)
_roots = staticmethod(chebroots)
_fromroots = staticmethod(chebfromroots)
@classmethod
def interpolate(cls, func, deg, domain=None, args=()):
"""Interpolate a function at the Chebyshev points of the first kind.
Returns the series that interpolates `func` at the Chebyshev points of
the first kind scaled and shifted to the `domain`. The resulting series
tends to a minmax approximation of `func` when the function is
continuous in the domain.
.. versionadded:: 1.14.0
Parameters
----------
func : function
The function to be interpolated. It must be a function of a single
variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are
extra arguments passed in the `args` parameter.
deg : int
Degree of the interpolating polynomial.
domain : {None, [beg, end]}, optional
Domain over which `func` is interpolated. The default is None, in
which case the domain is [-1, 1].
args : tuple, optional
Extra arguments to be used in the function call. Default is no
extra arguments.
Returns
-------
polynomial : Chebyshev instance
Interpolating Chebyshev instance.
Notes
-----
See `numpy.polynomial.chebfromfunction` for more details.
"""
if domain is None:
domain = cls.domain
xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args)
coef = chebinterpolate(xfunc, deg)
return cls(coef, domain=domain)
# Virtual properties
domain = np.array(chebdomain)
window = np.array(chebdomain)
basis_name = 'T'
| [
"numpy.linalg.eigvals",
"numpy.moveaxis",
"numpy.empty",
"numpy.asarray",
"numpy.zeros",
"numpy.core.multiarray.normalize_axis_index",
"numpy.ndim",
"numpy.ones",
"numpy.iterable",
"numpy.array",
"numpy.arange",
"numpy.cos",
"numpy.linspace",
"numpy.dot",
"numpy.convolve",
"numpy.all",... | [((11189, 11206), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (11197, 11206), True, 'import numpy as np\n'), ((11263, 11276), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (11271, 11276), True, 'import numpy as np\n'), ((11331, 11344), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (11339, 11344), True, 'import numpy as np\n'), ((11408, 11424), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (11416, 11424), True, 'import numpy as np\n'), ((3382, 3416), 'numpy.zeros', 'np.zeros', (['(2 * n - 1)'], {'dtype': 'c.dtype'}), '(2 * n - 1, dtype=c.dtype)\n', (3390, 3416), True, 'import numpy as np\n'), ((4583, 4602), 'numpy.convolve', 'np.convolve', (['z1', 'z2'], {}), '(z1, z2)\n', (4594, 4602), True, 'import numpy as np\n'), ((7035, 7071), 'numpy.array', 'np.array', (['[-1, 0, 1]'], {'dtype': 'zs.dtype'}), '([-1, 0, 1], dtype=zs.dtype)\n', (7043, 7071), True, 'import numpy as np\n'), ((7804, 7840), 'numpy.array', 'np.array', (['[-1, 0, 1]'], {'dtype': 'zs.dtype'}), '([-1, 0, 1], dtype=zs.dtype)\n', (7812, 7840), True, 'import numpy as np\n'), ((24081, 24112), 'numpy.array', 'np.array', (['c'], {'ndmin': '(1)', 'copy': '(True)'}), '(c, ndmin=1, copy=True)\n', (24089, 24112), True, 'import numpy as np\n'), ((24398, 24433), 'numpy.core.multiarray.normalize_axis_index', 'normalize_axis_index', (['iaxis', 'c.ndim'], {}), '(iaxis, c.ndim)\n', (24418, 24433), False, 'from numpy.core.multiarray import normalize_axis_index\n'), ((24478, 24502), 'numpy.moveaxis', 'np.moveaxis', (['c', 'iaxis', '(0)'], {}), '(c, iaxis, 0)\n', (24489, 24502), True, 'import numpy as np\n'), ((24930, 24954), 'numpy.moveaxis', 'np.moveaxis', (['c', '(0)', 'iaxis'], {}), '(c, 0, iaxis)\n', (24941, 24954), True, 'import numpy as np\n'), ((28269, 28300), 'numpy.array', 'np.array', (['c'], {'ndmin': '(1)', 'copy': '(True)'}), '(c, ndmin=1, copy=True)\n', (28277, 28300), True, 'import numpy as np\n'), ((28865, 28900), 'numpy.core.multiarray.normalize_axis_index', 'normalize_axis_index', (['iaxis', 'c.ndim'], {}), '(iaxis, c.ndim)\n', (28885, 28900), False, 'from numpy.core.multiarray import normalize_axis_index\n'), ((28945, 28969), 'numpy.moveaxis', 'np.moveaxis', (['c', 'iaxis', '(0)'], {}), '(c, iaxis, 0)\n', (28956, 28969), True, 'import numpy as np\n'), ((29525, 29549), 'numpy.moveaxis', 'np.moveaxis', (['c', '(0)', 'iaxis'], {}), '(c, 0, iaxis)\n', (29536, 29549), True, 'import numpy as np\n'), ((32028, 32059), 'numpy.array', 'np.array', (['c'], {'ndmin': '(1)', 'copy': '(True)'}), '(c, ndmin=1, copy=True)\n', (32036, 32059), True, 'import numpy as np\n'), ((41442, 41468), 'numpy.empty', 'np.empty', (['dims'], {'dtype': 'dtyp'}), '(dims, dtype=dtyp)\n', (41450, 41468), True, 'import numpy as np\n'), ((41678, 41699), 'numpy.moveaxis', 'np.moveaxis', (['v', '(0)', '(-1)'], {}), '(v, 0, -1)\n', (41689, 41699), True, 'import numpy as np\n'), ((51396, 51427), 'numpy.zeros', 'np.zeros', (['(n, n)'], {'dtype': 'c.dtype'}), '((n, n), dtype=c.dtype)\n', (51404, 51427), True, 'import numpy as np\n'), ((51556, 51568), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (51563, 51568), True, 'import numpy as np\n'), ((53318, 53331), 'numpy.linalg.eigvals', 'la.eigvals', (['m'], {}), '(m)\n', (53328, 53331), True, 'import numpy.linalg as la\n'), ((55264, 55279), 'numpy.asarray', 'np.asarray', (['deg'], {}), '(deg)\n', (55274, 55279), True, 'import numpy as np\n'), ((55599, 55617), 'numpy.dot', 'np.dot', (['m.T', 'yfunc'], {}), '(m.T, yfunc)\n', (55605, 55617), True, 'import numpy as np\n'), ((58181, 58190), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (58187, 58190), True, 'import numpy as np\n'), ((58796, 58825), 'numpy.linspace', 'np.linspace', (['(-np.pi)', '(0)', '_npts'], {}), '(-np.pi, 0, _npts)\n', (58807, 58825), True, 'import numpy as np\n'), ((58837, 58846), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (58843, 58846), True, 'import numpy as np\n'), ((61705, 61725), 'numpy.array', 'np.array', (['chebdomain'], {}), '(chebdomain)\n', (61713, 61725), True, 'import numpy as np\n'), ((61739, 61759), 'numpy.array', 'np.array', (['chebdomain'], {}), '(chebdomain)\n', (61747, 61759), True, 'import numpy as np\n'), ((7082, 7102), 'numpy.arange', 'np.arange', (['(-n)', '(n + 1)'], {}), '(-n, n + 1)\n', (7091, 7102), True, 'import numpy as np\n'), ((7881, 7901), 'numpy.arange', 'np.arange', (['(-n)', '(n + 1)'], {}), '(-n, n + 1)\n', (7890, 7901), True, 'import numpy as np\n'), ((12015, 12035), 'numpy.array', 'np.array', (['[off, scl]'], {}), '([off, scl])\n', (12023, 12035), True, 'import numpy as np\n'), ((12061, 12076), 'numpy.array', 'np.array', (['[off]'], {}), '([off])\n', (12069, 12076), True, 'import numpy as np\n'), ((28384, 28398), 'numpy.iterable', 'np.iterable', (['k'], {}), '(k)\n', (28395, 28398), True, 'import numpy as np\n'), ((28706, 28719), 'numpy.ndim', 'np.ndim', (['lbnd'], {}), '(lbnd)\n', (28713, 28719), True, 'import numpy as np\n'), ((28784, 28796), 'numpy.ndim', 'np.ndim', (['scl'], {}), '(scl)\n', (28791, 28796), True, 'import numpy as np\n'), ((32181, 32194), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (32191, 32194), True, 'import numpy as np\n'), ((41343, 41375), 'numpy.array', 'np.array', (['x'], {'copy': '(False)', 'ndmin': '(1)'}), '(x, copy=False, ndmin=1)\n', (41351, 41375), True, 'import numpy as np\n'), ((51341, 51367), 'numpy.array', 'np.array', (['[[-c[0] / c[1]]]'], {}), '([[-c[0] / c[1]]])\n', (51349, 51367), True, 'import numpy as np\n'), ((53142, 53169), 'numpy.array', 'np.array', (['[]'], {'dtype': 'c.dtype'}), '([], dtype=c.dtype)\n', (53150, 53169), True, 'import numpy as np\n'), ((53205, 53229), 'numpy.array', 'np.array', (['[-c[0] / c[1]]'], {}), '([-c[0] / c[1]])\n', (53213, 53229), True, 'import numpy as np\n'), ((56816, 56829), 'numpy.ones', 'np.ones', (['ideg'], {}), '(ideg)\n', (56823, 56829), True, 'import numpy as np\n'), ((58106, 58151), 'numpy.linspace', 'np.linspace', (['(-np.pi)', '(0)', '_npts'], {'endpoint': '(False)'}), '(-np.pi, 0, _npts, endpoint=False)\n', (58117, 58151), True, 'import numpy as np\n'), ((5868, 5902), 'numpy.empty', 'np.empty', (['(dlen + 1)'], {'dtype': 'z1.dtype'}), '(dlen + 1, dtype=z1.dtype)\n', (5876, 5902), True, 'import numpy as np\n'), ((24655, 24698), 'numpy.empty', 'np.empty', (['((n,) + c.shape[1:])'], {'dtype': 'c.dtype'}), '((n,) + c.shape[1:], dtype=c.dtype)\n', (24663, 24698), True, 'import numpy as np\n'), ((29090, 29107), 'numpy.all', 'np.all', (['(c[0] == 0)'], {}), '(c[0] == 0)\n', (29096, 29107), True, 'import numpy as np\n'), ((29166, 29213), 'numpy.empty', 'np.empty', (['((n + 1,) + c.shape[1:])'], {'dtype': 'c.dtype'}), '((n + 1,) + c.shape[1:], dtype=c.dtype)\n', (29174, 29213), True, 'import numpy as np\n'), ((57415, 57431), 'numpy.sqrt', 'np.sqrt', (['(1.0 + x)'], {}), '(1.0 + x)\n', (57422, 57431), True, 'import numpy as np\n'), ((57433, 57449), 'numpy.sqrt', 'np.sqrt', (['(1.0 - x)'], {}), '(1.0 - x)\n', (57440, 57449), True, 'import numpy as np\n'), ((21761, 21789), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'c.dtype'}), '([1], dtype=c.dtype)\n', (21769, 21789), True, 'import numpy as np\n'), ((56770, 56795), 'numpy.arange', 'np.arange', (['(1)', '(2 * ideg)', '(2)'], {}), '(1, 2 * ideg, 2)\n', (56779, 56795), True, 'import numpy as np\n'), ((51455, 51467), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (51462, 51467), True, 'import numpy as np\n'), ((22040, 22060), 'numpy.convolve', 'np.convolve', (['prd', 'zs'], {}), '(prd, zs)\n', (22051, 22060), True, 'import numpy as np\n')] |
import functools
import operator
import os
import os.path
import sys
import numpy as np
# Bamboo utilities
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# Data
np.random.seed(2019102417)
_num_samples = 11
_sample_size = 7
_samples = np.random.normal(size=(_num_samples,_sample_size)).astype(np.float32)
# Sample access functions
def get_sample(index):
return _samples[index,:]
def num_samples():
return _num_samples
def sample_dims():
return (_sample_size,)
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
mini_batch_size = num_samples() // 2
trainer = lbann.Trainer(mini_batch_size)
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Input data
# Note: Sum with a weights layer so that gradient checking will
# verify that error signals are correct.
x_weights = lbann.Weights(optimizer=lbann.SGD(),
initializer=lbann.ConstantInitializer(value=0.0),
name='input_weights')
x = lbann.Sum(lbann.Reshape(lbann.Input(),
dims=tools.str_list(_sample_size)),
lbann.WeightsLayer(weights=x_weights,
dims=tools.str_list(_sample_size)))
x_lbann = x
# Objects for LBANN model
obj = []
metrics = []
callbacks = []
# ------------------------------------------
# Data-parallel layout, unbiased
# ------------------------------------------
# LBANN implementation
x = x_lbann
y = lbann.Variance(x, data_layout='data_parallel')
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='data-parallel layout, unbiased'))
# NumPy implementation
vals = []
for i in range(num_samples()):
x = get_sample(i).astype(np.float64)
y = np.cov(x, bias=False)
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Model-parallel layout, unbiased
# ------------------------------------------
# LBANN implementation
x = x_lbann
y = lbann.Variance(x, data_layout='model_parallel')
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='model-parallel layout, unbiased'))
# NumPy implementation
vals = []
for i in range(num_samples()):
x = get_sample(i).astype(np.float64)
y = np.cov(x, bias=False)
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Data-parallel layout, biased
# ------------------------------------------
# LBANN implementation
x = x_lbann
y = lbann.Variance(x, biased=True, data_layout='data_parallel')
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='data-parallel layout, biased'))
# NumPy implementation
vals = []
for i in range(num_samples()):
x = get_sample(i).astype(np.float64)
y = np.cov(x, bias=True)
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Model-parallel layout, biased
# ------------------------------------------
# LBANN implementation
x = x_lbann
y = lbann.Variance(x, biased=True, data_layout='model_parallel')
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='model-parallel layout, biased'))
# NumPy implementation
vals = []
for i in range(num_samples()):
x = get_sample(i).astype(np.float64)
y = np.cov(x, bias=True)
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Gradient checking
# ------------------------------------------
callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))
# ------------------------------------------
# Construct model
# ------------------------------------------
num_epochs = 0
return lbann.Model(num_epochs,
layers=lbann.traverse_layer_graph(x_lbann),
objective_function=obj,
metrics=metrics,
callbacks=callbacks)
def construct_data_reader(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
# ==============================================
# Setup PyTest
# ==============================================
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment, __file__):
globals()[_test_func.__name__] = _test_func
| [
"tools.str_list",
"tools.create_python_data_reader",
"numpy.random.seed",
"os.path.realpath",
"os.path.dirname",
"tools.create_tests",
"numpy.finfo",
"numpy.mean",
"numpy.random.normal",
"numpy.cov",
"tools.numpy_l2norm2"
] | [((123, 149), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (139, 149), False, 'import os\n'), ((164, 193), 'os.path.dirname', 'os.path.dirname', (['current_file'], {}), '(current_file)\n', (179, 193), False, 'import os\n'), ((536, 562), 'numpy.random.seed', 'np.random.seed', (['(2019102417)'], {}), '(2019102417)\n', (550, 562), True, 'import numpy as np\n'), ((7219, 7265), 'tools.create_tests', 'tools.create_tests', (['setup_experiment', '__file__'], {}), '(setup_experiment, __file__)\n', (7237, 7265), False, 'import tools\n'), ((2736, 2749), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (2743, 2749), True, 'import numpy as np\n'), ((3578, 3591), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (3585, 3591), True, 'import numpy as np\n'), ((4425, 4438), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (4432, 4438), True, 'import numpy as np\n'), ((5275, 5288), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (5282, 5288), True, 'import numpy as np\n'), ((226, 254), 'os.path.dirname', 'os.path.dirname', (['current_dir'], {}), '(current_dir)\n', (241, 254), False, 'import os\n'), ((609, 660), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(_num_samples, _sample_size)'}), '(size=(_num_samples, _sample_size))\n', (625, 660), True, 'import numpy as np\n'), ((2646, 2667), 'numpy.cov', 'np.cov', (['x'], {'bias': '(False)'}), '(x, bias=False)\n', (2652, 2667), True, 'import numpy as np\n'), ((2680, 2702), 'tools.numpy_l2norm2', 'tools.numpy_l2norm2', (['y'], {}), '(y)\n', (2699, 2702), False, 'import tools\n'), ((3488, 3509), 'numpy.cov', 'np.cov', (['x'], {'bias': '(False)'}), '(x, bias=False)\n', (3494, 3509), True, 'import numpy as np\n'), ((3522, 3544), 'tools.numpy_l2norm2', 'tools.numpy_l2norm2', (['y'], {}), '(y)\n', (3541, 3544), False, 'import tools\n'), ((4336, 4356), 'numpy.cov', 'np.cov', (['x'], {'bias': '(True)'}), '(x, bias=True)\n', (4342, 4356), True, 'import numpy as np\n'), ((4369, 4391), 'tools.numpy_l2norm2', 'tools.numpy_l2norm2', (['y'], {}), '(y)\n', (4388, 4391), False, 'import tools\n'), ((5186, 5206), 'numpy.cov', 'np.cov', (['x'], {'bias': '(True)'}), '(x, bias=True)\n', (5192, 5206), True, 'import numpy as np\n'), ((5219, 5241), 'tools.numpy_l2norm2', 'tools.numpy_l2norm2', (['y'], {}), '(y)\n', (5238, 5241), False, 'import tools\n'), ((2770, 2790), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (2778, 2790), True, 'import numpy as np\n'), ((3612, 3632), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (3620, 3632), True, 'import numpy as np\n'), ((4459, 4479), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (4467, 4479), True, 'import numpy as np\n'), ((5309, 5329), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (5317, 5329), True, 'import numpy as np\n'), ((6588, 6697), 'tools.create_python_data_reader', 'tools.create_python_data_reader', (['lbann', 'current_file', '"""get_sample"""', '"""num_samples"""', '"""sample_dims"""', '"""train"""'], {}), "(lbann, current_file, 'get_sample',\n 'num_samples', 'sample_dims', 'train')\n", (6619, 6697), False, 'import tools\n'), ((6819, 6927), 'tools.create_python_data_reader', 'tools.create_python_data_reader', (['lbann', 'current_file', '"""get_sample"""', '"""num_samples"""', '"""sample_dims"""', '"""test"""'], {}), "(lbann, current_file, 'get_sample',\n 'num_samples', 'sample_dims', 'test')\n", (6850, 6927), False, 'import tools\n'), ((1903, 1931), 'tools.str_list', 'tools.str_list', (['_sample_size'], {}), '(_sample_size)\n', (1917, 1931), False, 'import tools\n'), ((2032, 2060), 'tools.str_list', 'tools.str_list', (['_sample_size'], {}), '(_sample_size)\n', (2046, 2060), False, 'import tools\n')] |
from safety_multiagent_mujoco.mujoco_multi import MujocoMulti
import numpy as np
import time
def main():
# Swimmer
# env_args = {"scenario": "manyagent_swimmer",
# "agent_conf": "10x2",
# "agent_obsk": 1,
# "episode_limit": 1000}
# coupled_half_cheetah
# env_args = {"scenario": "coupled_half_cheetah",
# "agent_conf": "1p1",
# "agent_obsk": 1,
# "episode_limit": 1000}
# ANT 4
# env_args = {"scenario": "manyagent_ant",
# "agent_conf": "3x2",
# "agent_obsk": 1,
# "episode_limit": 1000}
# env_args = {"scenario": "manyagent_swimmer",
# "agent_conf": "10x2",
# "agent_obsk": 1,
# "episode_limit": 1000}
env_args = {"scenario": "HalfCheetah-v2",
"agent_conf": "2x3",
"agent_obsk": 1,
"episode_limit": 1000}
# env_args = {"scenario": "Hopper-v2",
# "agent_conf": "3x1",
# "agent_obsk": 1,
# "episode_limit": 1000}
# env_args = {"scenario": "Humanoid-v2",
# "agent_conf": "9|8",
# "agent_obsk": 1,
# "episode_limit": 1000}
# env_args = {"scenario": "Humanoid-v2",
# "agent_conf": "17x1",
# "agent_obsk": 1,
# "episode_limit": 1000}
# env_args = {"scenario": "Ant-v2",
# "agent_conf": "2x4",
# "agent_obsk": 1,
# "episode_limit": 1000}
# env_args = {"scenario": "Ant-v2",
# "agent_conf": "2x4d",
# "agent_obsk": 1,
# "episode_limit": 1000}
# env_args = {"scenario": "Ant-v2",
# "agent_conf": "4x2",
# "agent_obsk": 1,
# "episode_limit": 1000}
env = MujocoMulti(env_args=env_args)
env_info = env.get_env_info()
n_actions = env_info["n_actions"]
n_agents = env_info["n_agents"]
n_episodes = 10
for e in range(n_episodes):
ob=env.reset()
terminated = False
episode_reward = 0
while not terminated:
obs = env.get_obs()
state = env.get_state()
actions = []
for agent_id in range(n_agents):
avail_actions = env.get_avail_agent_actions(agent_id)
avail_actions_ind = np.nonzero(avail_actions)[0]
action = np.random.uniform(-10, 0.0, n_actions)
actions.append(action)
# reward, terminated, _ = env.step(actions)
# print("env.step(actions): ", env.step(actions))
get_obs, get_state, reward, dones, infos, get_avail_actions= env.step(actions)
# episode_reward += reward
# print("reward: ", reward)
cost_x= [[item['cost']] for item in infos]
print("cost_x:", cost_x)
print("reward:", reward)
# time.sleep(0.1)
env.render()
# print("Total reward in episode {} = {}".format(e, episode_reward))
env.close()
if __name__ == "__main__":
main()
"""
infos[cost]: [{'cost': 0.0, 'reward_forward': -0.6434413402233052, 'reward_ctrl': -4.010836585120964,
'reward_contact': -1.2071856383999997e-13, 'reward_survive': 1.0, 'cost_obj': 0.0, 'cost_done': 0.0},
{'cost': 0.0, 'reward_forward': -0.6434413402233052, 'reward_ctrl': -4.010836585120964,
'reward_contact': -1.2071856383999997e-13, 'reward_survive': 1.0, 'cost_obj': 0.0, 'cost_done': 0.0},
{'cost': 0.0, 'reward_forward': -0.6434413402233052, 'reward_ctrl': -4.010836585120964,
'reward_contact': -1.2071856383999997e-13, 'reward_survive': 1.0, 'cost_obj': 0.0, 'cost_done': 0.0}]
""" | [
"numpy.nonzero",
"numpy.random.uniform",
"safety_multiagent_mujoco.mujoco_multi.MujocoMulti"
] | [((1953, 1983), 'safety_multiagent_mujoco.mujoco_multi.MujocoMulti', 'MujocoMulti', ([], {'env_args': 'env_args'}), '(env_args=env_args)\n', (1964, 1983), False, 'from safety_multiagent_mujoco.mujoco_multi import MujocoMulti\n'), ((2552, 2590), 'numpy.random.uniform', 'np.random.uniform', (['(-10)', '(0.0)', 'n_actions'], {}), '(-10, 0.0, n_actions)\n', (2569, 2590), True, 'import numpy as np\n'), ((2498, 2523), 'numpy.nonzero', 'np.nonzero', (['avail_actions'], {}), '(avail_actions)\n', (2508, 2523), True, 'import numpy as np\n')] |
# Copyright 2016 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.from __future__ import print_function
from functools import reduce
import unittest
import GPflow
import tensorflow as tf
import numpy as np
from GPflow import settings
float_type = settings.dtypes.float_type
np_float_type = np.float32 if float_type is tf.float32 else np.float64
try:
import cPickle as pickle
except ImportError:
import pickle
class NamingTests(unittest.TestCase):
def test_unnamed(self):
p = GPflow.param.Param(1)
self.assertTrue(p.name == 'unnamed')
def test_bad_parent(self):
p = GPflow.param.Param(1)
m = GPflow.model.Model()
p._parent = m # do not do this.
with self.assertRaises(ValueError):
print(p.name)
class ParamTestsScalar(unittest.TestCase):
def setUp(self):
tf.reset_default_graph()
self.m = GPflow.param.Parameterized()
self.m.p = GPflow.param.Param(1.0)
def testAssign(self):
self.m.p = 2.0
self.assertTrue(isinstance(self.m.p, GPflow.param.Param))
self.assertTrue(self.m.get_free_state() == 2.0)
def testValue(self):
# make sure the correct value is returned
self.m.p = 3.0
self.assertTrue(isinstance(self.m.p.value, np.ndarray))
# make sure assignment does not work
with self.assertRaises(AttributeError):
self.m.p.value = 2.53
# make sure we get a copy
self.assertFalse(self.m.p.value is self.m.p._array)
def testReplacement(self):
old_p = self.m.p
new_p = GPflow.param.Param(3.0)
self.m.p = new_p
# Parameterized instances should not have _needs_recompile
self.assertFalse(hasattr(self.m, '_needs_recompile'))
self.assertFalse(old_p.highest_parent is self.m)
def testHighestParent(self):
self.assertTrue(self.m.p.highest_parent is self.m)
def testName(self):
self.assertTrue(self.m.p.name == 'p')
def testFixing(self):
self.m.p.fixed = False
self.m.fixed = True
self.assertTrue(self.m.p.fixed)
self.m.p.fixed = False
self.assertFalse(self.m.fixed)
def testFixedFreeState(self):
self.assertTrue(len(self.m.get_free_state()) == 1)
self.m.set_state(np.ones(1))
self.m.fixed = True
self.assertTrue(len(self.m.get_free_state()) == 0)
self.m.set_state(np.ones(0))
def testMakeTF(self):
x = tf.placeholder('float64')
l = self.m.make_tf_array(x)
self.assertTrue(l == 1)
l = self.m.p.make_tf_array(x)
self.assertTrue(l == 1)
def testFreeState(self):
xx = self.m.get_free_state()
self.assertTrue(np.allclose(xx, np.ones(1)))
y = np.array([34.0], np_float_type)
self.m.set_state(y)
self.assertTrue(np.allclose(self.m.get_free_state(), y))
def testFixed(self):
self.m.p.fixed = True
self.assertTrue(len(self.m.get_free_state()) == 0)
self.assertTrue(self.m.make_tf_array(tf.placeholder(float_type)) == 0)
def testRecompile(self):
self.m._needs_recompile = False
self.m.p.fixed = True
self.assertTrue(self.m._needs_recompile)
self.m._needs_recompile = False
self.m.p.prior = GPflow.priors.Gaussian(0, 1)
self.assertTrue(self.m._needs_recompile)
def testTFMode(self):
x = tf.placeholder('float64')
self.m.make_tf_array(x)
self.assertTrue(isinstance(self.m.p, GPflow.param.Param))
with self.m.tf_mode():
self.assertTrue(isinstance(self.m.p, tf.Tensor))
class ParamTestsDeeper(unittest.TestCase):
def setUp(self):
tf.reset_default_graph()
self.m = GPflow.param.Parameterized()
self.m.foo = GPflow.param.Parameterized()
self.m.foo.bar = GPflow.param.Parameterized()
self.m.foo.bar.baz = GPflow.param.Param(1.0)
def testHighestParent(self):
self.assertTrue(self.m.foo.highest_parent is self.m)
self.assertTrue(self.m.foo.bar.highest_parent is self.m)
self.assertTrue(self.m.foo.bar.baz.highest_parent is self.m)
def testReplacement(self):
old_p = self.m.foo.bar.baz
new_p = GPflow.param.Param(3.0)
self.m.foo.bar.baz = new_p
# Parameterized instances should not have _needs_recompile
self.assertFalse(hasattr(self.m, '_needs_recompile'))
self.assertFalse(old_p.highest_parent is self.m)
def testReplacement2(self):
old_p = self.m.foo.bar
new_p = GPflow.param.Parameterized()
new_p.baz = GPflow.param.Param(3.0)
self.m.foo.bar = new_p
self.assertTrue(new_p.baz.highest_parent is self.m)
self.assertFalse(old_p.highest_parent is self.m)
def testName(self):
self.assertTrue(self.m.foo.name == 'foo')
self.assertTrue(self.m.foo.bar.name == 'bar')
self.assertTrue(self.m.foo.bar.baz.name == 'baz')
def testMakeTF(self):
x = tf.placeholder('float64')
l = self.m.make_tf_array(x)
self.assertTrue(l == 1)
l = self.m.foo.make_tf_array(x)
self.assertTrue(l == 1)
l = self.m.foo.bar.make_tf_array(x)
self.assertTrue(l == 1)
l = self.m.foo.bar.baz.make_tf_array(x)
self.assertTrue(l == 1)
def testFreeState(self):
xx = self.m.get_free_state()
self.assertTrue(np.allclose(xx, np.ones(1)))
y = np.array([34.0], np_float_type)
self.m.set_state(y)
self.assertTrue(np.allclose(self.m.get_free_state(), y))
def testFixed(self):
self.m.foo.bar.baz.fixed = True
self.assertTrue(len(self.m.get_free_state()) == 0)
def testFixing(self):
self.m.fixed = False
self.m.foo.bar.fixed = True
self.assertTrue(self.m.fixed)
self.assertTrue(self.m.foo.fixed)
self.assertTrue(self.m.foo.bar.fixed)
self.assertTrue(self.m.foo.bar.baz.fixed)
self.m.foo.bar.baz.fixed = False
self.assertFalse(self.m.fixed)
self.assertFalse(self.m.foo.fixed)
self.assertFalse(self.m.foo.bar.fixed)
self.assertFalse(self.m.foo.bar.baz.fixed)
def testRecompile(self):
self.m._needs_recompile = False
self.m.foo.bar.baz.fixed = True
self.assertTrue(self.m._needs_recompile)
self.m._needs_recompile = False
self.m.foo.bar.baz.prior = GPflow.priors.Gaussian(0, 1)
self.assertTrue(self.m._needs_recompile)
def testTFMode(self):
x = tf.placeholder('float64')
self.m.make_tf_array(x)
self.assertTrue(isinstance(self.m.foo.bar.baz, GPflow.param.Param))
with self.m.tf_mode():
self.assertTrue(isinstance(self.m.foo.bar.baz, tf.Tensor))
class ParamTestsWider(unittest.TestCase):
def setUp(self):
tf.reset_default_graph()
self.m = GPflow.param.Parameterized()
self.m.foo = GPflow.param.Param(1.0)
self.m.bar = GPflow.param.Param(np.arange(10))
self.m.baz = GPflow.param.Param(np.random.randn(3, 3))
def testHighestParent(self):
self.assertTrue(self.m.foo.highest_parent is self.m)
self.assertTrue(self.m.bar.highest_parent is self.m)
self.assertTrue(self.m.baz.highest_parent is self.m)
def testName(self):
self.assertTrue(self.m.foo.name == 'foo')
self.assertTrue(self.m.bar.name == 'bar')
self.assertTrue(self.m.baz.name == 'baz')
def testMakeTF(self):
x = tf.placeholder('float64')
l = self.m.make_tf_array(x)
self.assertTrue(l == 20)
l = self.m.foo.make_tf_array(x)
self.assertTrue(l == 1)
l = self.m.bar.make_tf_array(x)
self.assertTrue(l == 10)
l = self.m.baz.make_tf_array(x)
self.assertTrue(l == 9)
def testFreeState(self):
xx = self.m.get_free_state()
self.assertTrue(len(xx) == 20)
y = np.random.randn(20)
self.m.set_state(y)
self.assertTrue(np.allclose(self.m.get_free_state(), y))
def testIndexParam(self):
fs = self.m.get_free_state()
for p in [self.m.foo, self.m.bar, self.m.baz]:
index, found = self.m.get_param_index(p)
self.assertTrue(found)
self.assertTrue(fs[index] == p.get_free_state()[0])
def testFixed(self):
self.m.foo.fixed = True
self.assertTrue(len(self.m.get_free_state()) == 19)
self.m.foo.fixed = False
self.m.bar.fixed = True
self.assertTrue(len(self.m.get_free_state()) == 10)
def testFixing(self):
self.m.fixed = False
self.m.foo.fixed = True
self.assertFalse(self.m.fixed)
self.assertTrue(self.m.foo.fixed)
self.assertFalse(self.m.bar.fixed)
self.assertFalse(self.m.baz.fixed)
self.m.bar.fixed = True
self.m.baz.fixed = True
self.assertTrue(self.m.fixed)
self.assertTrue(self.m.foo.fixed)
self.assertTrue(self.m.bar.fixed)
self.assertTrue(self.m.baz.fixed)
def testRecompile(self):
self.m._needs_recompile = False
self.m.foo.fixed = True
self.assertTrue(self.m._needs_recompile)
self.m._needs_recompile = False
self.m.bar.prior = GPflow.priors.Gaussian(0, 1)
self.assertTrue(self.m._needs_recompile)
def testTFMode(self):
x = tf.placeholder('float64')
self.m.make_tf_array(x)
self.assertTrue(all([isinstance(p, GPflow.param.Param) for p in (self.m.foo, self.m.bar, self.m.baz)]))
with self.m.tf_mode():
self.assertTrue(all([isinstance(p, tf.Tensor)
for p in (self.m.foo, self.m.bar, self.m.baz)]))
class SingleParamterizedInvariantTest(unittest.TestCase):
"""
Tests that invariants of only allowing a single reference to a given Parameterized in a tree
"""
def setUp(self):
tf.reset_default_graph()
def testSelfReference(self):
"""
Test we raise when a Parameterized object references itself
"""
m = GPflow.param.Parameterized()
with self.assertRaises(ValueError):
m.foo = m
def testReferenceBelow(self):
"""
Test we raise when we reference the same Parameterized object in a descendent node
"""
m = GPflow.param.Parameterized()
m.foo = GPflow.param.Parameterized()
with self.assertRaises(ValueError):
m.foo.bar = m
def testReferenceAbove(self):
"""
Test we raise when we reference the same Parameterized object in an ancestor node
"""
m = GPflow.param.Parameterized()
m.foo = GPflow.param.Parameterized()
m.foo.bar = GPflow.param.Parameterized()
with self.assertRaises(ValueError):
m.baz = m.foo.bar
def testReferenceAccross(self):
"""
Test we raise when we reference the same Parameterized object in a sibling node
"""
m = GPflow.param.Parameterized()
m.foo = GPflow.param.Parameterized()
m.foo.bar = GPflow.param.Parameterized()
m.boo = GPflow.param.Parameterized()
with self.assertRaises(ValueError):
m.boo.far = m.foo.bar
def testAddingToAnother(self):
"""
Adding the same Paramterized object to another tree is fine.
"""
m1 = GPflow.param.Parameterized()
m1.foo = GPflow.param.Parameterized()
m2 = GPflow.param.Parameterized()
m2.foo = m1.foo
def testReassign(self):
"""
We should be able to reassign the same value to the same param
"""
m1 = GPflow.param.Parameterized()
p = GPflow.param.Parameterized()
m1.foo = p # assign
m1.foo = p # reassign
class SingleParamInvariantTest(unittest.TestCase):
"""
Tests that invariants of only allowing a single reference to a given Param in a tree
"""
def setUp(self):
tf.reset_default_graph()
def testReferenceBelow(self):
"""
Test we raise when the same Param object is added further down the tree
"""
m = GPflow.param.Parameterized()
m.p = GPflow.param.Param(1)
m.foo = GPflow.param.Parameterized()
with self.assertRaises(ValueError):
m.foo.p = m.p
def testReferenceAbove(self):
"""
Test we raise when we reference the same Param object in a an ancestor node
"""
m = GPflow.param.Parameterized()
m.foo = GPflow.param.Parameterized()
m.foo.p = GPflow.param.Param(1)
with self.assertRaises(ValueError):
m.p = m.foo.p
def testReferenceAccross(self):
"""
Test we raise when we reference the same Param object in a sibling node
"""
m = GPflow.param.Parameterized()
m.foo = GPflow.param.Parameterized()
m.foo.p = GPflow.param.Param(1)
m.bar = GPflow.param.Parameterized()
with self.assertRaises(ValueError):
m.bar.p = m.foo.p
def testAddingToAnother(self):
"""
Adding the same Param object to another tree is fine.
"""
m1 = GPflow.param.Parameterized()
m1.foo = GPflow.param.Param(1)
m2 = GPflow.param.Parameterized()
m2.foo = m1.foo
def testReassign(self):
"""
We should be able to reassign the same value to the same param
"""
m1 = GPflow.param.Parameterized()
p = GPflow.param.Param(1)
m1.foo = p # assign
m1.foo = p # reassign
class TestParamList(unittest.TestCase):
def test_construction(self):
GPflow.param.ParamList([])
GPflow.param.ParamList([GPflow.param.Param(1)])
with self.assertRaises(AssertionError):
GPflow.param.ParamList([GPflow.param.Param(1), 'stringsnotallowed'])
with self.assertRaises(AssertionError):
# tuples not valid in constuctor:
GPflow.param.ParamList((GPflow.param.Param(1),))
with self.assertRaises(AssertionError):
# param objects not valid in constructor (must be in list)
GPflow.param.ParamList(GPflow.param.Param(1))
def test_naming(self):
p1 = GPflow.param.Param(1.2)
p2 = GPflow.param.Param(np.array([3.4, 5.6], np_float_type))
GPflow.param.ParamList([p1, p2])
self.assertTrue(p1.name == 'item0')
self.assertTrue(p2.name == 'item1')
def test_connected(self):
p1 = GPflow.param.Param(1.2)
p2 = GPflow.param.Param(np.array([3.4, 5.6], np_float_type))
l = GPflow.param.ParamList([p1, p2])
x = l.get_free_state()
x.sort()
self.assertTrue(np.all(x == np.array([1.2, 3.4, 5.6], np_float_type)))
def test_setitem(self):
p1 = GPflow.param.Param(1.2)
p2 = GPflow.param.Param(np.array([3.4, 5.6], np_float_type))
l = GPflow.param.ParamList([p1, p2])
l[0] = 1.2
self.assertTrue(p1._array == 1.2)
l[1] = np.array([1.1, 2.2], np_float_type)
self.assertTrue(np.all(p2._array == np.array([1.1, 2.2], np_float_type)))
with self.assertRaises(TypeError):
l[0] = GPflow.param.Param(12)
def test_append(self):
p1 = GPflow.param.Param(1.2)
p2 = GPflow.param.Param(np.array([3.4, 5.6], np_float_type))
l = GPflow.param.ParamList([p1])
l.append(p2)
self.assertTrue(p2 in l.sorted_params)
with self.assertRaises(AssertionError):
l.append('foo')
def test_len(self):
p1 = GPflow.param.Param(1.2)
p2 = GPflow.param.Param(np.array([3.4, 5.6], np_float_type))
l = GPflow.param.ParamList([p1])
l.append(p2)
self.assertTrue(len(l) == 2)
def test_with_parameterized(self):
pzd = GPflow.param.Parameterized()
p = GPflow.param.Param(1.2)
pzd.p = p
l = GPflow.param.ParamList([pzd])
# test assignment:
l[0].p = 5
self.assertTrue(l.get_free_state() == 5)
# test to make sure tf_mode get turned on and off
self.assertFalse(pzd._tf_mode)
with l.tf_mode():
self.assertTrue(pzd._tf_mode)
self.assertFalse(pzd._tf_mode)
def test_in_model(self):
class Foo(GPflow.model.Model):
def __init__(self):
GPflow.model.Model.__init__(self)
self.l = GPflow.param.ParamList([
GPflow.param.Param(1), GPflow.param.Param(12)])
def build_likelihood(self):
return -reduce(tf.add, [tf.square(x) for x in self.l])
m = Foo()
self.assertTrue(m.get_free_state().size == 2)
m.optimize(disp=False)
atol = 1e-6 if np_float_type is np.float32 else 1e-8
self.assertTrue(np.allclose(m.get_free_state(), 0., atol=atol))
class TestPickleAndDict(unittest.TestCase):
def setUp(self):
rng = np.random.RandomState(0)
X = rng.randn(10, 1)
Y = rng.randn(10, 1)
self.m = GPflow.gpr.GPR(X, Y, kern=GPflow.kernels.RBF(1))
def test(self):
# pickle and reload the model
s1 = pickle.dumps(self.m)
m1 = pickle.loads(s1)
d1 = self.m.get_parameter_dict()
d2 = m1.get_parameter_dict()
for key, val in d1.items():
assert np.all(val == d2[key])
class TestDictEmpty(unittest.TestCase):
def setUp(self):
self.m = GPflow.model.Model()
def test(self):
d = self.m.get_parameter_dict()
self.assertTrue(len(d.keys()) == 0)
self.m.set_parameter_dict(d)
class TestDictSimple(unittest.TestCase):
def setUp(self):
self.m = GPflow.model.Model()
self.m.p1 = GPflow.param.Param(np.random.randn(3, 2))
self.m.p2 = GPflow.param.Param(np.random.randn(10))
def test(self):
d = self.m.get_parameter_dict()
self.assertTrue(len(d.keys()) == 2)
state1 = self.m.get_free_state().copy()
self.m.set_state(state1 * 0)
self.m.set_parameter_dict(d)
self.assertTrue(np.all(state1 == self.m.get_free_state()))
class TestDictSVGP(unittest.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
X = self.rng.randn(10, 1)
Y = self.rng.randn(10, 1)
Z = self.rng.randn(5, 1)
self.m = GPflow.svgp.SVGP(X, Y, Z=Z, likelihood=GPflow.likelihoods.Gaussian(), kern=GPflow.kernels.RBF(1))
def test(self):
loglik1 = self.m.compute_log_likelihood()
d = self.m.get_parameter_dict()
# muck up the model
self.m.set_state(self.rng.randn(self.m.get_free_state().size))
loglik2 = self.m.compute_log_likelihood()
# reset the model
self.m.set_parameter_dict(d)
loglik3 = self.m.compute_log_likelihood()
self.assertFalse(np.allclose(loglik1, loglik2))
self.assertTrue(np.allclose(loglik1, loglik3))
class TestFixWithPrior(unittest.TestCase):
"""
This tests that models with a fixed parameter which has a prior continue to work
"""
def test(self):
m = GPflow.model.Model()
m.p = GPflow.param.Param(1.0, GPflow.transforms.positive)
m.pp = GPflow.param.Param(1.0, GPflow.transforms.positive)
m.p.prior = GPflow.priors.Gamma(1, 1)
m.pp.prior = GPflow.priors.Gamma(1, 1)
m.p.fixed = True
m.build_likelihood = lambda: tf.zeros([1], tf.float64)
m.optimize(disp=1, maxiter=10)
class TestRandomizeDefault(unittest.TestCase):
"""
This tests that distributions can sample random values without priors
"""
def test(self):
np.random.seed(1)
m = GPflow.model.Model()
m.p = GPflow.param.Param(1.0)
m.pp = GPflow.param.Param(1.0, GPflow.transforms.Log1pe())
m.pf = GPflow.param.Param(1.0)
m.pf.fixed = True
m.pmd = GPflow.param.Param(np.ones((5, 2)))
ltr = GPflow.transforms.LowerTriangular(1,2).forward(np.ones(2 * 10))
m.pmd2 = GPflow.param.Param(ltr, transform=GPflow.transforms.LowerTriangular(1,2))
#should work as (pseudo) random vals a.s. are not 1.0
m.p.randomize()
self.assertFalse(m.p.value == 1.0)
m.pp.randomize()
self.assertFalse(m.pp.value == 1.0 or m.pp.value <= 0.0)
#check if fixing works
m.pf.randomize()
self.assertTrue(m.pf.value == 1.0)
m.pf.randomize(skipfixed=False)
self.assertFalse(m.pf.value == 1.0)
#check multidimensional
pmd_shape = m.pmd.shape
m.pmd.randomize()
self.assertFalse(np.any(m.pmd.value == 1.0))
self.assertEquals(m.pmd.shape, pmd_shape)
#check non size-preserving transform
pmd2_shape = m.pmd2.shape
m.pmd2.randomize()
self.assertFalse(np.any(m.pmd2.value == 1.0))
self.assertEquals(m.pmd2.shape, pmd2_shape)
class TestRandomizePrior(unittest.TestCase):
"""
This tests that distributions can sample random values from priors
"""
def test(self):
np.random.seed(1)
from inspect import getargspec
m = GPflow.model.Model()
m.p = GPflow.param.Param(1.0)
m.pmd = GPflow.param.Param(np.eye(5), transform=GPflow.transforms.DiagMatrix())
priors = [obj for obj in GPflow.priors.__dict__.values() if
isinstance(obj, type) and
issubclass(obj, GPflow.priors.Prior) and
obj is not GPflow.priors.Prior]
with self.assertRaises(NotImplementedError):
m.p = 1.0
m.p.prior = GPflow.priors.Prior()
m.p.randomize()
for prior in priors:
signature = getargspec(prior.__init__)
params = {}
if signature.defaults is not None:
param_names = signature.args[:-len(signature.defaults)]
else:
param_names = signature.args
for param in param_names:
if param not in params.keys() and param is not 'self':
params[param] = 1.
m.p = 1.0
m.p.prior = prior(**params)
m.pmd.prior = prior(**params)
m.p.randomize()
m.pmd.randomize()
self.assertFalse(m.p.value == 1.0)
self.assertFalse(np.any(m.pmd.value == np.ones(5)))
self.assertTrue(m.pmd.value.shape == (5,5))
class TestRandomizeFeedPriors(unittest.TestCase):
"""
Test if standard randomize behavior can be overriden using
distributions keyword.
"""
def test(self):
np.random.seed(1)
m = GPflow.model.Model()
m.p = GPflow.param.Param(1.0)
with self.assertRaises(NotImplementedError):
m.p.randomize(distributions={m.p: GPflow.priors.Prior()})
m.p.randomize(distributions={m.p: GPflow.priors.Gaussian(0, 1)})
self.assertFalse(m.p.value == 1.0)
class TestRandomizeHierarchical(unittest.TestCase):
"""
This tests that models can randomize all contained parameters
"""
def test(self):
np.random.seed(1)
m = GPflow.model.Model()
m.p = GPflow.param.Param(1.0)
m.p2 = GPflow.param.Param(1.0)
m.m = GPflow.model.Model()
m.m.p = GPflow.param.Param(1.0)
m.m.p2 = GPflow.param.Param(1.0)
m.p2.prior = GPflow.priors.Gaussian(0, 1)
m.m.p2.prior = GPflow.priors.Gaussian(0, 1)
m.randomize()
self.assertFalse(m.p.value == 1.0)
self.assertFalse(m.p2.value == 1.0)
self.assertFalse(m.m.p.value == 1.0)
self.assertFalse(m.m.p2.value == 1.0)
class TestScopes(unittest.TestCase):
def setUp(self):
rng = np.random.RandomState(0)
X = rng.randn(10, 1)
k = GPflow.kernels.RBF(1)
Y = rng.randn(10, 1)
self.m = GPflow.gpr.GPR(X, Y, k)
self.m._compile()
def test_likelihood_name(self):
with self.m.tf_mode():
with self.m._graph.as_default():
l = self.m.build_likelihood()
expected_name = self.m.name + '.build_likelihood'
self.assertTrue(expected_name in l.name)
def test_kern_name(self):
with self.m.tf_mode():
with self.m._graph.as_default():
K = self.m.kern.K(self.m.X)
self.assertTrue('kern.K' in K.name)
if __name__ == "__main__":
unittest.main()
| [
"numpy.random.seed",
"tensorflow.reset_default_graph",
"numpy.allclose",
"numpy.ones",
"GPflow.transforms.DiagMatrix",
"numpy.arange",
"GPflow.transforms.Log1pe",
"GPflow.priors.Prior",
"unittest.main",
"GPflow.priors.Gamma",
"GPflow.gpr.GPR",
"numpy.random.randn",
"numpy.random.RandomState"... | [((25049, 25064), 'unittest.main', 'unittest.main', ([], {}), '()\n', (25062, 25064), False, 'import unittest\n'), ((1010, 1031), 'GPflow.param.Param', 'GPflow.param.Param', (['(1)'], {}), '(1)\n', (1028, 1031), False, 'import GPflow\n'), ((1121, 1142), 'GPflow.param.Param', 'GPflow.param.Param', (['(1)'], {}), '(1)\n', (1139, 1142), False, 'import GPflow\n'), ((1155, 1175), 'GPflow.model.Model', 'GPflow.model.Model', ([], {}), '()\n', (1173, 1175), False, 'import GPflow\n'), ((1361, 1385), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1383, 1385), True, 'import tensorflow as tf\n'), ((1403, 1431), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (1429, 1431), False, 'import GPflow\n'), ((1451, 1474), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.0)'], {}), '(1.0)\n', (1469, 1474), False, 'import GPflow\n'), ((2107, 2130), 'GPflow.param.Param', 'GPflow.param.Param', (['(3.0)'], {}), '(3.0)\n', (2125, 2130), False, 'import GPflow\n'), ((2996, 3021), 'tensorflow.placeholder', 'tf.placeholder', (['"""float64"""'], {}), "('float64')\n", (3010, 3021), True, 'import tensorflow as tf\n'), ((3295, 3326), 'numpy.array', 'np.array', (['[34.0]', 'np_float_type'], {}), '([34.0], np_float_type)\n', (3303, 3326), True, 'import numpy as np\n'), ((3829, 3857), 'GPflow.priors.Gaussian', 'GPflow.priors.Gaussian', (['(0)', '(1)'], {}), '(0, 1)\n', (3851, 3857), False, 'import GPflow\n'), ((3946, 3971), 'tensorflow.placeholder', 'tf.placeholder', (['"""float64"""'], {}), "('float64')\n", (3960, 3971), True, 'import tensorflow as tf\n'), ((4236, 4260), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4258, 4260), True, 'import tensorflow as tf\n'), ((4278, 4306), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (4304, 4306), False, 'import GPflow\n'), ((4328, 4356), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (4354, 4356), False, 'import GPflow\n'), ((4382, 4410), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (4408, 4410), False, 'import GPflow\n'), ((4440, 4463), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.0)'], {}), '(1.0)\n', (4458, 4463), False, 'import GPflow\n'), ((4776, 4799), 'GPflow.param.Param', 'GPflow.param.Param', (['(3.0)'], {}), '(3.0)\n', (4794, 4799), False, 'import GPflow\n'), ((5101, 5129), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (5127, 5129), False, 'import GPflow\n'), ((5150, 5173), 'GPflow.param.Param', 'GPflow.param.Param', (['(3.0)'], {}), '(3.0)\n', (5168, 5173), False, 'import GPflow\n'), ((5548, 5573), 'tensorflow.placeholder', 'tf.placeholder', (['"""float64"""'], {}), "('float64')\n", (5562, 5573), True, 'import tensorflow as tf\n'), ((6007, 6038), 'numpy.array', 'np.array', (['[34.0]', 'np_float_type'], {}), '([34.0], np_float_type)\n', (6015, 6038), True, 'import numpy as np\n'), ((6981, 7009), 'GPflow.priors.Gaussian', 'GPflow.priors.Gaussian', (['(0)', '(1)'], {}), '(0, 1)\n', (7003, 7009), False, 'import GPflow\n'), ((7098, 7123), 'tensorflow.placeholder', 'tf.placeholder', (['"""float64"""'], {}), "('float64')\n", (7112, 7123), True, 'import tensorflow as tf\n'), ((7408, 7432), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (7430, 7432), True, 'import tensorflow as tf\n'), ((7450, 7478), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (7476, 7478), False, 'import GPflow\n'), ((7500, 7523), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.0)'], {}), '(1.0)\n', (7518, 7523), False, 'import GPflow\n'), ((8073, 8098), 'tensorflow.placeholder', 'tf.placeholder', (['"""float64"""'], {}), "('float64')\n", (8087, 8098), True, 'import tensorflow as tf\n'), ((8508, 8527), 'numpy.random.randn', 'np.random.randn', (['(20)'], {}), '(20)\n', (8523, 8527), True, 'import numpy as np\n'), ((9843, 9871), 'GPflow.priors.Gaussian', 'GPflow.priors.Gaussian', (['(0)', '(1)'], {}), '(0, 1)\n', (9865, 9871), False, 'import GPflow\n'), ((9960, 9985), 'tensorflow.placeholder', 'tf.placeholder', (['"""float64"""'], {}), "('float64')\n", (9974, 9985), True, 'import tensorflow as tf\n'), ((10503, 10527), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (10525, 10527), True, 'import tensorflow as tf\n'), ((10666, 10694), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (10692, 10694), False, 'import GPflow\n'), ((10924, 10952), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (10950, 10952), False, 'import GPflow\n'), ((10969, 10997), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (10995, 10997), False, 'import GPflow\n'), ((11230, 11258), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (11256, 11258), False, 'import GPflow\n'), ((11275, 11303), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (11301, 11303), False, 'import GPflow\n'), ((11324, 11352), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (11350, 11352), False, 'import GPflow\n'), ((11589, 11617), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (11615, 11617), False, 'import GPflow\n'), ((11634, 11662), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (11660, 11662), False, 'import GPflow\n'), ((11683, 11711), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (11709, 11711), False, 'import GPflow\n'), ((11729, 11757), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (11755, 11757), False, 'import GPflow\n'), ((11979, 12007), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (12005, 12007), False, 'import GPflow\n'), ((12025, 12053), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (12051, 12053), False, 'import GPflow\n'), ((12068, 12096), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (12094, 12096), False, 'import GPflow\n'), ((12258, 12286), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (12284, 12286), False, 'import GPflow\n'), ((12299, 12327), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (12325, 12327), False, 'import GPflow\n'), ((12575, 12599), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (12597, 12599), True, 'import tensorflow as tf\n'), ((12751, 12779), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (12777, 12779), False, 'import GPflow\n'), ((12794, 12815), 'GPflow.param.Param', 'GPflow.param.Param', (['(1)'], {}), '(1)\n', (12812, 12815), False, 'import GPflow\n'), ((12832, 12860), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (12858, 12860), False, 'import GPflow\n'), ((13087, 13115), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (13113, 13115), False, 'import GPflow\n'), ((13132, 13160), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (13158, 13160), False, 'import GPflow\n'), ((13179, 13200), 'GPflow.param.Param', 'GPflow.param.Param', (['(1)'], {}), '(1)\n', (13197, 13200), False, 'import GPflow\n'), ((13425, 13453), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (13451, 13453), False, 'import GPflow\n'), ((13470, 13498), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (13496, 13498), False, 'import GPflow\n'), ((13517, 13538), 'GPflow.param.Param', 'GPflow.param.Param', (['(1)'], {}), '(1)\n', (13535, 13538), False, 'import GPflow\n'), ((13556, 13584), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (13582, 13584), False, 'import GPflow\n'), ((13795, 13823), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (13821, 13823), False, 'import GPflow\n'), ((13841, 13862), 'GPflow.param.Param', 'GPflow.param.Param', (['(1)'], {}), '(1)\n', (13859, 13862), False, 'import GPflow\n'), ((13877, 13905), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (13903, 13905), False, 'import GPflow\n'), ((14067, 14095), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (14093, 14095), False, 'import GPflow\n'), ((14108, 14129), 'GPflow.param.Param', 'GPflow.param.Param', (['(1)'], {}), '(1)\n', (14126, 14129), False, 'import GPflow\n'), ((14273, 14299), 'GPflow.param.ParamList', 'GPflow.param.ParamList', (['[]'], {}), '([])\n', (14295, 14299), False, 'import GPflow\n'), ((14858, 14881), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.2)'], {}), '(1.2)\n', (14876, 14881), False, 'import GPflow\n'), ((14959, 14991), 'GPflow.param.ParamList', 'GPflow.param.ParamList', (['[p1, p2]'], {}), '([p1, p2])\n', (14981, 14991), False, 'import GPflow\n'), ((15124, 15147), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.2)'], {}), '(1.2)\n', (15142, 15147), False, 'import GPflow\n'), ((15229, 15261), 'GPflow.param.ParamList', 'GPflow.param.ParamList', (['[p1, p2]'], {}), '([p1, p2])\n', (15251, 15261), False, 'import GPflow\n'), ((15431, 15454), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.2)'], {}), '(1.2)\n', (15449, 15454), False, 'import GPflow\n'), ((15536, 15568), 'GPflow.param.ParamList', 'GPflow.param.ParamList', (['[p1, p2]'], {}), '([p1, p2])\n', (15558, 15568), False, 'import GPflow\n'), ((15647, 15682), 'numpy.array', 'np.array', (['[1.1, 2.2]', 'np_float_type'], {}), '([1.1, 2.2], np_float_type)\n', (15655, 15682), True, 'import numpy as np\n'), ((15892, 15915), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.2)'], {}), '(1.2)\n', (15910, 15915), False, 'import GPflow\n'), ((15997, 16025), 'GPflow.param.ParamList', 'GPflow.param.ParamList', (['[p1]'], {}), '([p1])\n', (16019, 16025), False, 'import GPflow\n'), ((16209, 16232), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.2)'], {}), '(1.2)\n', (16227, 16232), False, 'import GPflow\n'), ((16314, 16342), 'GPflow.param.ParamList', 'GPflow.param.ParamList', (['[p1]'], {}), '([p1])\n', (16336, 16342), False, 'import GPflow\n'), ((16455, 16483), 'GPflow.param.Parameterized', 'GPflow.param.Parameterized', ([], {}), '()\n', (16481, 16483), False, 'import GPflow\n'), ((16496, 16519), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.2)'], {}), '(1.2)\n', (16514, 16519), False, 'import GPflow\n'), ((16550, 16579), 'GPflow.param.ParamList', 'GPflow.param.ParamList', (['[pzd]'], {}), '([pzd])\n', (16572, 16579), False, 'import GPflow\n'), ((17580, 17604), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (17601, 17604), True, 'import numpy as np\n'), ((17801, 17821), 'pickle.dumps', 'pickle.dumps', (['self.m'], {}), '(self.m)\n', (17813, 17821), False, 'import pickle\n'), ((17835, 17851), 'pickle.loads', 'pickle.loads', (['s1'], {}), '(s1)\n', (17847, 17851), False, 'import pickle\n'), ((18089, 18109), 'GPflow.model.Model', 'GPflow.model.Model', ([], {}), '()\n', (18107, 18109), False, 'import GPflow\n'), ((18333, 18353), 'GPflow.model.Model', 'GPflow.model.Model', ([], {}), '()\n', (18351, 18353), False, 'import GPflow\n'), ((18851, 18875), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (18872, 18875), True, 'import numpy as np\n'), ((19758, 19778), 'GPflow.model.Model', 'GPflow.model.Model', ([], {}), '()\n', (19776, 19778), False, 'import GPflow\n'), ((19793, 19844), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.0)', 'GPflow.transforms.positive'], {}), '(1.0, GPflow.transforms.positive)\n', (19811, 19844), False, 'import GPflow\n'), ((19860, 19911), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.0)', 'GPflow.transforms.positive'], {}), '(1.0, GPflow.transforms.positive)\n', (19878, 19911), False, 'import GPflow\n'), ((19932, 19957), 'GPflow.priors.Gamma', 'GPflow.priors.Gamma', (['(1)', '(1)'], {}), '(1, 1)\n', (19951, 19957), False, 'import GPflow\n'), ((19979, 20004), 'GPflow.priors.Gamma', 'GPflow.priors.Gamma', (['(1)', '(1)'], {}), '(1, 1)\n', (19998, 20004), False, 'import GPflow\n'), ((20299, 20316), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (20313, 20316), True, 'import numpy as np\n'), ((20329, 20349), 'GPflow.model.Model', 'GPflow.model.Model', ([], {}), '()\n', (20347, 20349), False, 'import GPflow\n'), ((20364, 20387), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.0)'], {}), '(1.0)\n', (20382, 20387), False, 'import GPflow\n'), ((20470, 20493), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.0)'], {}), '(1.0)\n', (20488, 20493), False, 'import GPflow\n'), ((21715, 21732), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (21729, 21732), True, 'import numpy as np\n'), ((21785, 21805), 'GPflow.model.Model', 'GPflow.model.Model', ([], {}), '()\n', (21803, 21805), False, 'import GPflow\n'), ((21820, 21843), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.0)'], {}), '(1.0)\n', (21838, 21843), False, 'import GPflow\n'), ((23256, 23273), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (23270, 23273), True, 'import numpy as np\n'), ((23286, 23306), 'GPflow.model.Model', 'GPflow.model.Model', ([], {}), '()\n', (23304, 23306), False, 'import GPflow\n'), ((23321, 23344), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.0)'], {}), '(1.0)\n', (23339, 23344), False, 'import GPflow\n'), ((23749, 23766), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (23763, 23766), True, 'import numpy as np\n'), ((23779, 23799), 'GPflow.model.Model', 'GPflow.model.Model', ([], {}), '()\n', (23797, 23799), False, 'import GPflow\n'), ((23814, 23837), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.0)'], {}), '(1.0)\n', (23832, 23837), False, 'import GPflow\n'), ((23853, 23876), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.0)'], {}), '(1.0)\n', (23871, 23876), False, 'import GPflow\n'), ((23891, 23911), 'GPflow.model.Model', 'GPflow.model.Model', ([], {}), '()\n', (23909, 23911), False, 'import GPflow\n'), ((23928, 23951), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.0)'], {}), '(1.0)\n', (23946, 23951), False, 'import GPflow\n'), ((23969, 23992), 'GPflow.param.Param', 'GPflow.param.Param', (['(1.0)'], {}), '(1.0)\n', (23987, 23992), False, 'import GPflow\n'), ((24015, 24043), 'GPflow.priors.Gaussian', 'GPflow.priors.Gaussian', (['(0)', '(1)'], {}), '(0, 1)\n', (24037, 24043), False, 'import GPflow\n'), ((24067, 24095), 'GPflow.priors.Gaussian', 'GPflow.priors.Gaussian', (['(0)', '(1)'], {}), '(0, 1)\n', (24089, 24095), False, 'import GPflow\n'), ((24371, 24395), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (24392, 24395), True, 'import numpy as np\n'), ((24437, 24458), 'GPflow.kernels.RBF', 'GPflow.kernels.RBF', (['(1)'], {}), '(1)\n', (24455, 24458), False, 'import GPflow\n'), ((24505, 24528), 'GPflow.gpr.GPR', 'GPflow.gpr.GPR', (['X', 'Y', 'k'], {}), '(X, Y, k)\n', (24519, 24528), False, 'import GPflow\n'), ((2821, 2831), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (2828, 2831), True, 'import numpy as np\n'), ((2945, 2955), 'numpy.ones', 'np.ones', (['(0)'], {}), '(0)\n', (2952, 2955), True, 'import numpy as np\n'), ((7564, 7577), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (7573, 7577), True, 'import numpy as np\n'), ((7619, 7640), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)'], {}), '(3, 3)\n', (7634, 7640), True, 'import numpy as np\n'), ((14914, 14949), 'numpy.array', 'np.array', (['[3.4, 5.6]', 'np_float_type'], {}), '([3.4, 5.6], np_float_type)\n', (14922, 14949), True, 'import numpy as np\n'), ((15180, 15215), 'numpy.array', 'np.array', (['[3.4, 5.6]', 'np_float_type'], {}), '([3.4, 5.6], np_float_type)\n', (15188, 15215), True, 'import numpy as np\n'), ((15487, 15522), 'numpy.array', 'np.array', (['[3.4, 5.6]', 'np_float_type'], {}), '([3.4, 5.6], np_float_type)\n', (15495, 15522), True, 'import numpy as np\n'), ((15828, 15850), 'GPflow.param.Param', 'GPflow.param.Param', (['(12)'], {}), '(12)\n', (15846, 15850), False, 'import GPflow\n'), ((15948, 15983), 'numpy.array', 'np.array', (['[3.4, 5.6]', 'np_float_type'], {}), '([3.4, 5.6], np_float_type)\n', (15956, 15983), True, 'import numpy as np\n'), ((16265, 16300), 'numpy.array', 'np.array', (['[3.4, 5.6]', 'np_float_type'], {}), '([3.4, 5.6], np_float_type)\n', (16273, 16300), True, 'import numpy as np\n'), ((17986, 18008), 'numpy.all', 'np.all', (['(val == d2[key])'], {}), '(val == d2[key])\n', (17992, 18008), True, 'import numpy as np\n'), ((18393, 18414), 'numpy.random.randn', 'np.random.randn', (['(3)', '(2)'], {}), '(3, 2)\n', (18408, 18414), True, 'import numpy as np\n'), ((18455, 18474), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (18470, 18474), True, 'import numpy as np\n'), ((19493, 19522), 'numpy.allclose', 'np.allclose', (['loglik1', 'loglik2'], {}), '(loglik1, loglik2)\n', (19504, 19522), True, 'import numpy as np\n'), ((19548, 19577), 'numpy.allclose', 'np.allclose', (['loglik1', 'loglik3'], {}), '(loglik1, loglik3)\n', (19559, 19577), True, 'import numpy as np\n'), ((20067, 20092), 'tensorflow.zeros', 'tf.zeros', (['[1]', 'tf.float64'], {}), '([1], tf.float64)\n', (20075, 20092), True, 'import tensorflow as tf\n'), ((20427, 20453), 'GPflow.transforms.Log1pe', 'GPflow.transforms.Log1pe', ([], {}), '()\n', (20451, 20453), False, 'import GPflow\n'), ((20556, 20571), 'numpy.ones', 'np.ones', (['(5, 2)'], {}), '((5, 2))\n', (20563, 20571), True, 'import numpy as np\n'), ((20634, 20649), 'numpy.ones', 'np.ones', (['(2 * 10)'], {}), '(2 * 10)\n', (20641, 20649), True, 'import numpy as np\n'), ((21262, 21288), 'numpy.any', 'np.any', (['(m.pmd.value == 1.0)'], {}), '(m.pmd.value == 1.0)\n', (21268, 21288), True, 'import numpy as np\n'), ((21472, 21499), 'numpy.any', 'np.any', (['(m.pmd2.value == 1.0)'], {}), '(m.pmd2.value == 1.0)\n', (21478, 21499), True, 'import numpy as np\n'), ((21879, 21888), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (21885, 21888), True, 'import numpy as np\n'), ((22254, 22275), 'GPflow.priors.Prior', 'GPflow.priors.Prior', ([], {}), '()\n', (22273, 22275), False, 'import GPflow\n'), ((22358, 22384), 'inspect.getargspec', 'getargspec', (['prior.__init__'], {}), '(prior.__init__)\n', (22368, 22384), False, 'from inspect import getargspec\n'), ((3269, 3279), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (3276, 3279), True, 'import numpy as np\n'), ((5981, 5991), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (5988, 5991), True, 'import numpy as np\n'), ((14332, 14353), 'GPflow.param.Param', 'GPflow.param.Param', (['(1)'], {}), '(1)\n', (14350, 14353), False, 'import GPflow\n'), ((14794, 14815), 'GPflow.param.Param', 'GPflow.param.Param', (['(1)'], {}), '(1)\n', (14812, 14815), False, 'import GPflow\n'), ((16998, 17031), 'GPflow.model.Model.__init__', 'GPflow.model.Model.__init__', (['self'], {}), '(self)\n', (17025, 17031), False, 'import GPflow\n'), ((17706, 17727), 'GPflow.kernels.RBF', 'GPflow.kernels.RBF', (['(1)'], {}), '(1)\n', (17724, 17727), False, 'import GPflow\n'), ((19033, 19062), 'GPflow.likelihoods.Gaussian', 'GPflow.likelihoods.Gaussian', ([], {}), '()\n', (19060, 19062), False, 'import GPflow\n'), ((19069, 19090), 'GPflow.kernels.RBF', 'GPflow.kernels.RBF', (['(1)'], {}), '(1)\n', (19087, 19090), False, 'import GPflow\n'), ((20587, 20626), 'GPflow.transforms.LowerTriangular', 'GPflow.transforms.LowerTriangular', (['(1)', '(2)'], {}), '(1, 2)\n', (20620, 20626), False, 'import GPflow\n'), ((20702, 20741), 'GPflow.transforms.LowerTriangular', 'GPflow.transforms.LowerTriangular', (['(1)', '(2)'], {}), '(1, 2)\n', (20735, 20741), False, 'import GPflow\n'), ((21900, 21930), 'GPflow.transforms.DiagMatrix', 'GPflow.transforms.DiagMatrix', ([], {}), '()\n', (21928, 21930), False, 'import GPflow\n'), ((21966, 21997), 'GPflow.priors.__dict__.values', 'GPflow.priors.__dict__.values', ([], {}), '()\n', (21995, 21997), False, 'import GPflow\n'), ((3580, 3606), 'tensorflow.placeholder', 'tf.placeholder', (['float_type'], {}), '(float_type)\n', (3594, 3606), True, 'import tensorflow as tf\n'), ((14440, 14461), 'GPflow.param.Param', 'GPflow.param.Param', (['(1)'], {}), '(1)\n', (14458, 14461), False, 'import GPflow\n'), ((14615, 14636), 'GPflow.param.Param', 'GPflow.param.Param', (['(1)'], {}), '(1)\n', (14633, 14636), False, 'import GPflow\n'), ((15346, 15386), 'numpy.array', 'np.array', (['[1.2, 3.4, 5.6]', 'np_float_type'], {}), '([1.2, 3.4, 5.6], np_float_type)\n', (15354, 15386), True, 'import numpy as np\n'), ((15727, 15762), 'numpy.array', 'np.array', (['[1.1, 2.2]', 'np_float_type'], {}), '([1.1, 2.2], np_float_type)\n', (15735, 15762), True, 'import numpy as np\n'), ((23510, 23538), 'GPflow.priors.Gaussian', 'GPflow.priors.Gaussian', (['(0)', '(1)'], {}), '(0, 1)\n', (23532, 23538), False, 'import GPflow\n'), ((17102, 17123), 'GPflow.param.Param', 'GPflow.param.Param', (['(1)'], {}), '(1)\n', (17120, 17123), False, 'import GPflow\n'), ((17125, 17147), 'GPflow.param.Param', 'GPflow.param.Param', (['(12)'], {}), '(12)\n', (17143, 17147), False, 'import GPflow\n'), ((23000, 23010), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (23007, 23010), True, 'import numpy as np\n'), ((23444, 23465), 'GPflow.priors.Prior', 'GPflow.priors.Prior', ([], {}), '()\n', (23463, 23465), False, 'import GPflow\n'), ((17231, 17243), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (17240, 17243), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""
普量学院量化投资课程系列案例源码包
普量学院版权所有
仅用于教学目的,严禁转发和用于盈利目的,违者必究
©Plouto-Quants All Rights Reserved
普量学院助教微信:niuxiaomi3
"""
from pymongo import ASCENDING, DESCENDING
from database import DB_CONN
from datetime import datetime, timedelta
import tushare as ts
import numpy as np
import pandas as pd
def compute_drawdown(net_values):
"""
计算最大回撤
:param net_values: 净值列表
"""
# 最大回撤初始值设为0
max_drawdown = 0
size = len(net_values)
index = 0
# 双层循环找出最大回撤
for net_value in net_values:
for sub_net_value in net_values[index:]:
drawdown = 1 - sub_net_value / net_value
if drawdown > max_drawdown:
max_drawdown = drawdown
index += 1
return max_drawdown
def dynamic_max_drawdown(net_value):
nums = len(net_value)
maxDrawDown = pd.Series()
for i in range(nums):
C = net_value[:i].max()
if C == net_value[i]:
maxDrawDown.loc[i] = 0
else:
maxDrawDown.loc[i] = abs((C - net_value[i]) / C)
return maxDrawDown
def compute_annual_profit(trading_days, net_value):
"""
计算年化收益
"""
annual_profit = 0
if trading_days > 0:
# 计算年数
years = trading_days / 245
# 计算年化收益
annual_profit = pow(net_value, 1 / years) - 1
annual_profit = np.round(annual_profit * 100, 2)
return annual_profit
def compute_sharpe_ratio(net_value, df_day_profit):
"""
计算夏普比率
:param net_value: 最后的净值
:param df_day_profit: 单日的收益,profit:策略单日收益,hs300:沪深300的单日涨跌幅
"""
# 总交易日数
trading_days = df_day_profit.index.size
# 计算单日收益标准差
profit_std = np.round(df_day_profit['profit'].std(), 4)
print(profit_std)
# 年化收益
annual_profit = compute_annual_profit(trading_days, net_value)
# 夏普比率
sharpe_ratio = (annual_profit - 4.75) / (profit_std * pow(245, 1 / 2))
return annual_profit, sharpe_ratio
def compute_ir(df_day_profit):
"""
计算信息率
:param df_day_profit: 单日收益,profit - 策略收益 hs300 - 沪深300的
:return: 信息率
"""
# 计算单日的无风险收益率
base_profit = 4.5 / 245
df_extra_profit = pd.DataFrame(columns=['profit', 'hs300'])
df_extra_profit['profit'] = df_day_profit['profit'] - base_profit
df_extra_profit['hs300'] = df_day_profit['hs300'] - base_profit
# 计算策略的单日收益和基准单日涨跌幅的协方差
cov = df_extra_profit['profit'].cov(df_extra_profit['hs300'])
# 计算策略收益和基准收益沪深300的方差
var_profit = df_extra_profit['profit'].var()
var_hs300 = df_extra_profit['hs300'].var()
# 计算Beta
beta = cov / var_hs300
# 残差风险
omega = pow((var_profit - pow(beta, 2) * var_hs300) * 245, 1/2)
# Alpha
alpha = (df_extra_profit['profit'].mean() - (beta * df_extra_profit['hs300'].mean())) * 245
# 信息率
ir = np.round(alpha / omega, 4)
print('cov:%10.4f,var_profit:%10.4f,var_hs300:%10.4f,beta:%10.4f,omega:%10.4f,alpha:%10.4f,ir:%10.4f' %
(cov, var_profit, var_hs300, beta, omega, alpha, ir), flush=True)
return ir
def get_trading_dates(begin_date=None, end_date=None):
"""
获取指定日期范围的按照正序排列的交易日列表
如果没有指定日期范围,则获取从当期日期向前365个自然日内的所有交易日
:param begin_date: 开始日期
:param end_date: 结束日期
:return: 日期列表
"""
# 开始日期,默认今天向前的365个自然日
now = datetime.now()
if begin_date is None:
one_year_ago = now - timedelta(days=365)
begin_date = one_year_ago.strftime('%Y-%m-%d')
# 结束日期默认为今天
if end_date is None:
end_date = now.strftime('%Y-%m-%d')
# 下面这个方法是很好,起码是本地的,不需要联网,前提是有下载这段时间的数据
# 因为默认下载的是2015年的,所以需要再下载2017年的数据
# daily_cursor = DB_CONN.daily.find(
# {'code': '000001', 'date': {'$gte': begin_date, '$lte': end_date}, 'index': True},
# sort=[('date', ASCENDING)],
# projection={'date': True, '_id': False})
#
# dates = [x['date'] for x in daily_cursor]
# if len(dates) == 0:
all_trade_dates = ts.trade_cal()
trade_dates = all_trade_dates[(all_trade_dates.isOpen == 1) & \
(all_trade_dates.calendarDate >= begin_date) & \
(all_trade_dates.calendarDate <= end_date)]
dates = trade_dates.calendarDate.tolist()
return dates
def get_all_codes(date=None):
"""
获取某个交易日的所有股票代码列表,如果没有指定日期,则从当前日期一直向前找,直到找到有
数据的一天,返回的即是那个交易日的股票代码列表
:param date: 日期
:return: 股票代码列表
"""
datetime_obj = datetime.now()
if date is None:
date = datetime_obj.strftime('%Y-%m-%d')
codes = []
while len(codes) == 0:
code_cursor = DB_CONN.basic.find(
{'date': date},
projection={'code': True, '_id': False})
codes = [x['code'] for x in code_cursor]
datetime_obj = datetime_obj - timedelta(days=1)
date = datetime_obj.strftime('%Y-%m-%d')
return codes
| [
"pandas.DataFrame",
"tushare.trade_cal",
"datetime.datetime.now",
"datetime.timedelta",
"pandas.Series",
"numpy.round",
"database.DB_CONN.basic.find"
] | [((844, 855), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (853, 855), True, 'import pandas as pd\n'), ((1348, 1380), 'numpy.round', 'np.round', (['(annual_profit * 100)', '(2)'], {}), '(annual_profit * 100, 2)\n', (1356, 1380), True, 'import numpy as np\n'), ((2147, 2188), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['profit', 'hs300']"}), "(columns=['profit', 'hs300'])\n", (2159, 2188), True, 'import pandas as pd\n'), ((2790, 2816), 'numpy.round', 'np.round', (['(alpha / omega)', '(4)'], {}), '(alpha / omega, 4)\n', (2798, 2816), True, 'import numpy as np\n'), ((3264, 3278), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3276, 3278), False, 'from datetime import datetime, timedelta\n'), ((3901, 3915), 'tushare.trade_cal', 'ts.trade_cal', ([], {}), '()\n', (3913, 3915), True, 'import tushare as ts\n'), ((4392, 4406), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4404, 4406), False, 'from datetime import datetime, timedelta\n'), ((4542, 4617), 'database.DB_CONN.basic.find', 'DB_CONN.basic.find', (["{'date': date}"], {'projection': "{'code': True, '_id': False}"}), "({'date': date}, projection={'code': True, '_id': False})\n", (4560, 4617), False, 'from database import DB_CONN\n'), ((3335, 3354), 'datetime.timedelta', 'timedelta', ([], {'days': '(365)'}), '(days=365)\n', (3344, 3354), False, 'from datetime import datetime, timedelta\n'), ((4732, 4749), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4741, 4749), False, 'from datetime import datetime, timedelta\n')] |
"""
============================
Typing (:mod:`numpy.typing`)
============================
.. warning::
Some of the types in this module rely on features only present in
the standard library in Python 3.8 and greater. If you want to use
these types in earlier versions of Python, you should install the
typing-extensions_ package.
Large parts of the NumPy API have PEP-484-style type annotations. In
addition a number of type aliases are available to users, most prominently
the two below:
- `ArrayLike`: objects that can be converted to arrays
- `DTypeLike`: objects that can be converted to dtypes
.. _typing-extensions: https://pypi.org/project/typing-extensions/
Differences from the runtime NumPy API
--------------------------------------
NumPy is very flexible. Trying to describe the full range of
possibilities statically would result in types that are not very
helpful. For that reason, the typed NumPy API is often stricter than
the runtime NumPy API. This section describes some notable
differences.
ArrayLike
~~~~~~~~~
The `ArrayLike` type tries to avoid creating object arrays. For
example,
.. code-block:: python
>>> np.array(x**2 for x in range(10))
array(<generator object <genexpr> at ...>, dtype=object)
is valid NumPy code which will create a 0-dimensional object
array. Type checkers will complain about the above example when using
the NumPy types however. If you really intended to do the above, then
you can either use a ``# type: ignore`` comment:
.. code-block:: python
>>> np.array(x**2 for x in range(10)) # type: ignore
or explicitly type the array like object as `~typing.Any`:
.. code-block:: python
>>> from typing import Any
>>> array_like: Any = (x**2 for x in range(10))
>>> np.array(array_like)
array(<generator object <genexpr> at ...>, dtype=object)
ndarray
~~~~~~~
It's possible to mutate the dtype of an array at runtime. For example,
the following code is valid:
.. code-block:: python
>>> x = np.array([1, 2])
>>> x.dtype = np.bool_
This sort of mutation is not allowed by the types. Users who want to
write statically typed code should insted use the `numpy.ndarray.view`
method to create a view of the array with a different dtype.
DTypeLike
~~~~~~~~~
The `DTypeLike` type tries to avoid creation of dtype objects using
dictionary of fields like below:
.. code-block:: python
>>> x = np.dtype({"field1": (float, 1), "field2": (int, 3)})
Although this is valid Numpy code, the type checker will complain about it,
since its usage is discouraged.
Please see : :ref:`Data type objects <arrays.dtypes>`
Number Precision
~~~~~~~~~~~~~~~~
The precision of `numpy.number` subclasses is treated as a covariant generic
parameter (see :class:`~NBitBase`), simplifying the annoting of proccesses
involving precision-based casting.
.. code-block:: python
>>> from typing import TypeVar
>>> import numpy as np
>>> import numpy.typing as npt
>>> T = TypeVar("T", bound=npt.NBitBase)
>>> def func(a: "np.floating[T]", b: "np.floating[T]") -> "np.floating[T]":
... ...
Consequently, the likes of `~numpy.float16`, `~numpy.float32` and
`~numpy.float64` are still sub-types of `~numpy.floating`, but, contrary to
runtime, they're not necessarily considered as sub-classes.
Timedelta64
~~~~~~~~~~~
The `~numpy.timedelta64` class is not considered a subclass of `~numpy.signedinteger`,
the former only inheriting from `~numpy.generic` while static type checking.
API
---
"""
# NOTE: The API section will be appended with additional entries
# further down in this file
from typing import TYPE_CHECKING, List
if TYPE_CHECKING:
import sys
if sys.version_info >= (3, 8):
from typing import final
else:
from typing_extensions import final
else:
def final(f): return f
if not TYPE_CHECKING:
__all__ = ["ArrayLike", "DTypeLike", "NBitBase"]
else:
# Ensure that all objects within this module are accessible while
# static type checking. This includes private ones, as we need them
# for internal use.
#
# Declare to mypy that `__all__` is a list of strings without assigning
# an explicit value
__all__: List[str]
@final # Dissallow the creation of arbitrary `NBitBase` subclasses
class NBitBase:
"""
An object representing `numpy.number` precision during static type checking.
Used exclusively for the purpose static type checking, `NBitBase`
represents the base of a hierachieral set of subclasses.
Each subsequent subclass is herein used for representing a lower level
of precision, *e.g.* ``64Bit > 32Bit > 16Bit``.
Examples
--------
Below is a typical usage example: `NBitBase` is herein used for annotating a
function that takes a float and integer of arbitrary precision as arguments
and returns a new float of whichever precision is largest
(*e.g.* ``np.float16 + np.int64 -> np.float64``).
.. code-block:: python
>>> from typing import TypeVar, TYPE_CHECKING
>>> import numpy as np
>>> import numpy.typing as npt
>>> T = TypeVar("T", bound=npt.NBitBase)
>>> def add(a: "np.floating[T]", b: "np.integer[T]") -> "np.floating[T]":
... return a + b
>>> a = np.float16()
>>> b = np.int64()
>>> out = add(a, b)
>>> if TYPE_CHECKING:
... reveal_locals()
... # note: Revealed local types are:
... # note: a: numpy.floating[numpy.typing._16Bit*]
... # note: b: numpy.signedinteger[numpy.typing._64Bit*]
... # note: out: numpy.floating[numpy.typing._64Bit*]
"""
def __init_subclass__(cls) -> None:
allowed_names = {
"NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit",
"_64Bit", "_32Bit", "_16Bit", "_8Bit",
}
if cls.__name__ not in allowed_names:
raise TypeError('cannot inherit from final class "NBitBase"')
super().__init_subclass__()
# Silence errors about subclassing a `@final`-decorated class
class _256Bit(NBitBase): ... # type: ignore[misc]
class _128Bit(_256Bit): ... # type: ignore[misc]
class _96Bit(_128Bit): ... # type: ignore[misc]
class _80Bit(_96Bit): ... # type: ignore[misc]
class _64Bit(_80Bit): ... # type: ignore[misc]
class _32Bit(_64Bit): ... # type: ignore[misc]
class _16Bit(_32Bit): ... # type: ignore[misc]
class _8Bit(_16Bit): ... # type: ignore[misc]
# Clean up the namespace
del TYPE_CHECKING, final, List
from ._scalars import (
_CharLike,
_BoolLike,
_UIntLike,
_IntLike,
_FloatLike,
_ComplexLike,
_TD64Like,
_NumberLike,
_ScalarLike,
_VoidLike,
)
from ._array_like import _SupportsArray, ArrayLike
from ._shape import _Shape, _ShapeLike
from ._dtype_like import _SupportsDType, _VoidDTypeLike, DTypeLike
if __doc__ is not None:
from ._add_docstring import _docstrings
__doc__ += _docstrings
__doc__ += '\n.. autoclass:: numpy.typing.NBitBase\n'
del _docstrings
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| [
"numpy._pytesttester.PytestTester"
] | [((7111, 7133), 'numpy._pytesttester.PytestTester', 'PytestTester', (['__name__'], {}), '(__name__)\n', (7123, 7133), False, 'from numpy._pytesttester import PytestTester\n')] |
import os
import re
import numpy as np
import csv
import cv2
from PIL import Image
import matplotlib.pyplot as plt
from time import strftime
import pytesseract
import tensorflow as tf
def load_interpreter(model_path=None):
"""
This function loads a tflite model interpreter
"""
if model_path is None:
model_path = 'tablenet_densenet121_lite.tflite'
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
return interpreter
def adjust(new_rows, maxi):
"""
A function to set all with maxi number of columns
for making csv compatible
"""
rows = []
for each_row in new_rows:
if len(each_row) < maxi:
for i in range(maxi - len(each_row)):
each_row.append("-")
rows.append(each_row)
return rows
def text2csv(text):
"""
This funtion transorms a text with newline and spaces to
a csv that treats the spaces in the text as comma and newlines as carriage return
"""
rows = text.split('\n')
new_rows = []
maxi = 0
for each_row in rows:
temp_row = each_row.split()
if maxi < len(temp_row):
maxi = len(temp_row)
new_rows.append(temp_row)
new_rows = adjust(new_rows, maxi)
header = ['column_{}'.format(i) for i in range(maxi)]
tstr = strftime("%Y%m%d-%H%M")
temp_dir = 'output'
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
temp_file = os.path.join(temp_dir, 'temp_{}.csv'.format(tstr))
with open(temp_file, 'w') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(header)
csvwriter.writerows(new_rows)
return temp_file
def append_offset(name, offset):
"""
This function is used for assigning a name with offset if a file with the same name exists
It takes a filename and a offset and returns a valid equivalent name with offset number
Example :
# assume two variables
name = 'python.py'
offset = '2'
append_offset(name, offset)
# The above invocation will return string as
# 'python_2.py'
"""
fname, extension = name.split('.')
fname = ''.join([fname, '_', offset, '.', extension])
return fname
def render(mask):
mask = tf.argmax(mask, axis=-1)
mask = mask[..., tf.newaxis]
return mask[0]
def visualize(image):
plt.figure(figsize=(15, 15))
title = 'Cropped Table'
plt.title(title)
plt.imshow(tf.keras.preprocessing.image.array_to_img(image))
plt.axis('off')
plt.show()
def final(img_path, output_dir='output', show_table=False):
interpreter = load_interpreter()
image_orig = Image.open(img_path)
original_dim = image_orig.size
image = image_orig.resize((512,512))
np_image = np.asarray(image)/255.0
np_image = np_image.astype(np.float32)
np_image = np.expand_dims(np_image, axis=0)
ip_d = interpreter.get_input_details()[0]
op_d = interpreter.get_output_details()[0]
interpreter.set_tensor(ip_d['index'], np_image)
interpreter.invoke()
tab_mask = interpreter.get_tensor(op_d['index'])
tab_mask = np.squeeze(render(tab_mask).numpy())
tab_mask = Image.fromarray(np.uint8(tab_mask))
tab_mask = tab_mask.resize(original_dim)
tab_mask = np.array(tab_mask)
image_orig = np.array(image_orig)
x, y, w, h = cv2.boundingRect(tab_mask)
tab = image_orig[y:y+h, x:x+w]
text = pytesseract.image_to_string(tab)
text = text.strip()
text = re.sub("[\r\n]+", "\r\n", text)
csv = text2csv(text)
if show_table:
return csv, tab
return csv | [
"matplotlib.pyplot.title",
"time.strftime",
"matplotlib.pyplot.figure",
"tensorflow.keras.preprocessing.image.array_to_img",
"os.path.exists",
"cv2.boundingRect",
"re.sub",
"numpy.uint8",
"matplotlib.pyplot.show",
"csv.writer",
"numpy.asarray",
"tensorflow.lite.Interpreter",
"os.makedirs",
... | [((410, 452), 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_path': 'model_path'}), '(model_path=model_path)\n', (429, 452), True, 'import tensorflow as tf\n'), ((1395, 1418), 'time.strftime', 'strftime', (['"""%Y%m%d-%H%M"""'], {}), "('%Y%m%d-%H%M')\n", (1403, 1418), False, 'from time import strftime\n'), ((2345, 2369), 'tensorflow.argmax', 'tf.argmax', (['mask'], {'axis': '(-1)'}), '(mask, axis=-1)\n', (2354, 2369), True, 'import tensorflow as tf\n'), ((2450, 2478), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (2460, 2478), True, 'import matplotlib.pyplot as plt\n'), ((2513, 2529), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2522, 2529), True, 'import matplotlib.pyplot as plt\n'), ((2601, 2616), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2609, 2616), True, 'import matplotlib.pyplot as plt\n'), ((2622, 2632), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2630, 2632), True, 'import matplotlib.pyplot as plt\n'), ((2760, 2780), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (2770, 2780), False, 'from PIL import Image\n'), ((2959, 2991), 'numpy.expand_dims', 'np.expand_dims', (['np_image'], {'axis': '(0)'}), '(np_image, axis=0)\n', (2973, 2991), True, 'import numpy as np\n'), ((3409, 3427), 'numpy.array', 'np.array', (['tab_mask'], {}), '(tab_mask)\n', (3417, 3427), True, 'import numpy as np\n'), ((3450, 3470), 'numpy.array', 'np.array', (['image_orig'], {}), '(image_orig)\n', (3458, 3470), True, 'import numpy as np\n'), ((3489, 3515), 'cv2.boundingRect', 'cv2.boundingRect', (['tab_mask'], {}), '(tab_mask)\n', (3505, 3515), False, 'import cv2\n'), ((3570, 3602), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['tab'], {}), '(tab)\n', (3597, 3602), False, 'import pytesseract\n'), ((3640, 3671), 're.sub', 're.sub', (["'[\\r\\n]+'", "'\\r\\n'", 'text'], {}), "('[\\r\\n]+', '\\r\\n', text)\n", (3646, 3671), False, 'import re\n'), ((1456, 1480), 'os.path.exists', 'os.path.exists', (['temp_dir'], {}), '(temp_dir)\n', (1470, 1480), False, 'import os\n'), ((1491, 1512), 'os.makedirs', 'os.makedirs', (['temp_dir'], {}), '(temp_dir)\n', (1502, 1512), False, 'import os\n'), ((1641, 1654), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (1651, 1654), False, 'import csv\n'), ((2546, 2594), 'tensorflow.keras.preprocessing.image.array_to_img', 'tf.keras.preprocessing.image.array_to_img', (['image'], {}), '(image)\n', (2587, 2594), True, 'import tensorflow as tf\n'), ((2875, 2892), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (2885, 2892), True, 'import numpy as np\n'), ((3325, 3343), 'numpy.uint8', 'np.uint8', (['tab_mask'], {}), '(tab_mask)\n', (3333, 3343), True, 'import numpy as np\n')] |
import time
from multiprocessing.dummy import Pool as ThreadPool
import pandas as pd
from bs4 import BeautifulSoup
import numpy as np
import requests
import queue
table = queue.Queue()
failed_url = queue.Queue()
PROXY_POOL_URL = 'http://localhost:5555/random'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'
}
def fetch(url):
try:
response = requests.get(PROXY_POOL_URL, timeout=100)
assert response.status_code == 200
proxy = response.text
proxies = {'http': 'http://'+proxy, 'https': 'https://'+proxy}
response = requests.get(url, headers=headers, proxies=proxies, timeout=50)
assert response.status_code == 200
response.encoding = 'utf-8'
return response.text
except:
return None
def parser(html, movie):
if not html:
failed_url.put(movie)
return None
try:
soup = BeautifulSoup(html, 'lxml')
items = list(soup.find_all("div", class_="comment-item"))
if len(items) == 0:
failed_url.put(movie)
return None
for item in items:
comment = item.find('p').get_text().strip()
content = np.array([*movie, comment])
table.put(content)
except:
print('parser error')
pass
def download(movie):
html = fetch(movie[4])
parser(html, movie)
if __name__ == '__main__':
#movies = pd.read_csv('reviews_url.csv')
movies = pd.read_csv('failed136.csv')
print('*' * 50)
t1 = time.time()
tasks = [movie for movie in movies.values]
pool = ThreadPool(800)
pool.map_async(download, tasks)
pool.close()
failed_url.join()
table.join()
pool.join()
time.sleep(10)
df = pd.DataFrame(list(table.queue), columns=['name', 'rating', 'subject_href', 'comment_nums', 'comment_href', 'label', 'content'])
df_failed = pd.DataFrame(list(failed_url.queue), columns=['name', 'rating', 'subject_href', 'comment_nums', 'comment_href', 'label'])
df_failed.to_csv('failed/failed{}.csv'.format(len(df_failed)), index=False)
df.to_csv('result/{}.csv'.format(len(df)), index=False)
t2 = time.time()
print('time consumption:%s' % (t2 - t1))
print('*' * 50)
| [
"pandas.read_csv",
"multiprocessing.dummy.Pool",
"time.sleep",
"time.time",
"numpy.array",
"requests.get",
"bs4.BeautifulSoup",
"queue.Queue"
] | [((174, 187), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (185, 187), False, 'import queue\n'), ((201, 214), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (212, 214), False, 'import queue\n'), ((1566, 1594), 'pandas.read_csv', 'pd.read_csv', (['"""failed136.csv"""'], {}), "('failed136.csv')\n", (1577, 1594), True, 'import pandas as pd\n'), ((1624, 1635), 'time.time', 'time.time', ([], {}), '()\n', (1633, 1635), False, 'import time\n'), ((1695, 1710), 'multiprocessing.dummy.Pool', 'ThreadPool', (['(800)'], {}), '(800)\n', (1705, 1710), True, 'from multiprocessing.dummy import Pool as ThreadPool\n'), ((1830, 1844), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1840, 1844), False, 'import time\n'), ((2269, 2280), 'time.time', 'time.time', ([], {}), '()\n', (2278, 2280), False, 'import time\n'), ((475, 516), 'requests.get', 'requests.get', (['PROXY_POOL_URL'], {'timeout': '(100)'}), '(PROXY_POOL_URL, timeout=100)\n', (487, 516), False, 'import requests\n'), ((681, 744), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'proxies': 'proxies', 'timeout': '(50)'}), '(url, headers=headers, proxies=proxies, timeout=50)\n', (693, 744), False, 'import requests\n'), ((1007, 1034), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""lxml"""'], {}), "(html, 'lxml')\n", (1020, 1034), False, 'from bs4 import BeautifulSoup\n'), ((1292, 1319), 'numpy.array', 'np.array', (['[*movie, comment]'], {}), '([*movie, comment])\n', (1300, 1319), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import time
class Identity:
def __init__(self, eps=0.0001, name=""):
self.name = name + "preprocessing(identity)"
def fit(self, x, *args, **kwargs):
return self
def fit_transform(self, x, *args, **kwargs):
return x
def transform(self, x, *args, **kwargs):
return x
class Standardize:
def __init__(self, eps=0.000001, axis=[0], name=""):
self.name = name + "preprocessing(standardize,eps=" + str(eps) + ")"
self.eps = eps
self.axis = axis
def fit(self, x, **kwargs):
print(self.name + " fitting...")
t = time.time()
self.mean = x.mean(axis=tuple(self.axis), keepdims=True)
self.std = x.std(axis=tuple(self.axis), keepdims=True) + self.eps
print(self.name + " done in {0:.2f} s.".format(time.time() - t))
return self
def transform(self, x, inplace=False, **kwargs):
if inplace:
x -= self.mean
x /= self.std
else:
return (x - self.mean) / self.std
def fit_transform(self, x, inplace=False, **kwargs):
self.fit(x)
return self.transform(x, inplace)
class ZCAWhitening:
def __init__(self, eps=0.0001, name=""):
self.name = name + "preprocessing(zcawhitening,eps=" + str(eps) + ")"
self.eps = eps
def fit(self, x):
print(self.name + " fitting ...")
t = time.time()
flatx = np.reshape(x, (x.shape[0], -1))
self.mean = flatx.mean(0, keepdims=True)
self.S, self.U = _spectral_decomposition(flatx - self.mean, self.eps)
print(self.name + " done in {0:.2f} s.".format(time.time() - t))
return self
def transform(self, x):
flatx = np.reshape(x, (x.shape[0], -1)) - self.mean
return _zca_whitening(flatx, self.U, self.S).reshape(x.shape)
def fit_transform(self, x):
self.fit(x)
return self.transform(x)
def _spectral_decomposition(flatx, eps):
U, S, V = np.linalg.svd(flatx, full_matrices=False)
S = np.diag(1.0 / np.sqrt(S + eps))
return S, V
def _zca_whitening(flatx, U, S):
M = np.dot(np.dot(U.T, S), U)
return np.dot(M, flatx.T).T
| [
"time.time",
"numpy.linalg.svd",
"numpy.reshape",
"numpy.dot",
"numpy.sqrt"
] | [((2051, 2092), 'numpy.linalg.svd', 'np.linalg.svd', (['flatx'], {'full_matrices': '(False)'}), '(flatx, full_matrices=False)\n', (2064, 2092), True, 'import numpy as np\n'), ((673, 684), 'time.time', 'time.time', ([], {}), '()\n', (682, 684), False, 'import time\n'), ((1469, 1480), 'time.time', 'time.time', ([], {}), '()\n', (1478, 1480), False, 'import time\n'), ((1497, 1528), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0], -1)'], {}), '(x, (x.shape[0], -1))\n', (1507, 1528), True, 'import numpy as np\n'), ((2199, 2213), 'numpy.dot', 'np.dot', (['U.T', 'S'], {}), '(U.T, S)\n', (2205, 2213), True, 'import numpy as np\n'), ((2229, 2247), 'numpy.dot', 'np.dot', (['M', 'flatx.T'], {}), '(M, flatx.T)\n', (2235, 2247), True, 'import numpy as np\n'), ((1794, 1825), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0], -1)'], {}), '(x, (x.shape[0], -1))\n', (1804, 1825), True, 'import numpy as np\n'), ((2115, 2131), 'numpy.sqrt', 'np.sqrt', (['(S + eps)'], {}), '(S + eps)\n', (2122, 2131), True, 'import numpy as np\n'), ((879, 890), 'time.time', 'time.time', ([], {}), '()\n', (888, 890), False, 'import time\n'), ((1711, 1722), 'time.time', 'time.time', ([], {}), '()\n', (1720, 1722), False, 'import time\n')] |
"""
Mask grid points based on different criteria.
"""
import numpy as np
from .base import n_1d_arrays
try:
from pykdtree.kdtree import KDTree
except ImportError:
from scipy.spatial import cKDTree as KDTree # pylint: disable=no-name-in-module
def distance_mask(
data_coordinates, maxdist, coordinates=None, grid=None, projection=None
):
"""
Mask grid points that are too far from the given data points.
Distances are Euclidean norms. If using geographic data, provide a projection
function to convert coordinates to Cartesian before distance calculations.
Either *coordinates* or *grid* must be given:
* If *coordinates* is not None, produces an array that is False when a point is
more than *maxdist* from the closest data point and True otherwise.
* If *grid* is not None, produces a mask and applies it to *grid* (an
:class:`xarray.Dataset`).
.. note::
If installed, package ``pykdtree`` will be used instead of
:class:`scipy.spatial.cKDTree` for better performance.
Parameters
----------
data_coordinates : tuple of arrays
Same as *coordinates* but for the data points.
maxdist : float
The maximum distance that a point can be from the closest data point.
coordinates : None or tuple of arrays
Arrays with the coordinates of each point that will be masked. Should
be in the following order: (easting, northing, ...). Only easting and northing
will be used, all subsequent coordinates will be ignored.
grid : None or :class:`xarray.Dataset`
2D grid with values to be masked. Will use the first two dimensions of the grid
as northing and easting coordinates, respectively. The mask will be applied to
*grid* using the :meth:`xarray.Dataset.where` method.
projection : callable or None
If not None, then should be a callable object ``projection(easting, northing) ->
(proj_easting, proj_northing)`` that takes in easting and northing coordinate
arrays and returns projected easting and northing coordinate arrays. This
function will be used to project the given coordinates (or the ones extracted
from the grid) before calculating distances.
Returns
-------
mask : array or :class:`xarray.Dataset`
If *coordinates* was given, then a boolean array with the same shape as
the elements of *coordinates*. If *grid* was given, then an
:class:`xarray.Dataset` with the mask applied to it.
Examples
--------
>>> from verde import grid_coordinates
>>> region = (0, 5, -10, -4)
>>> spacing = 1
>>> coords = grid_coordinates(region, spacing=spacing)
>>> mask = distance_mask((2.5, -7.5), maxdist=2, coordinates=coords)
>>> print(mask)
[[False False False False False False]
[False False True True False False]
[False True True True True False]
[False True True True True False]
[False False True True False False]
[False False False False False False]
[False False False False False False]]
>>> # Mask an xarray.Dataset directly
>>> import xarray as xr
>>> coords_dict = {"easting": coords[0][0, :], "northing": coords[1][:, 0]}
>>> data_vars = {"scalars": (["northing", "easting"], np.ones(mask.shape))}
>>> grid = xr.Dataset(data_vars, coords=coords_dict)
>>> masked = distance_mask((3.5, -7.5), maxdist=2, grid=grid)
>>> print(masked.scalars.values)
[[nan nan nan nan nan nan]
[nan nan nan 1. 1. nan]
[nan nan 1. 1. 1. 1.]
[nan nan 1. 1. 1. 1.]
[nan nan nan 1. 1. nan]
[nan nan nan nan nan nan]
[nan nan nan nan nan nan]]
"""
if coordinates is None and grid is None:
raise ValueError("Either coordinates or grid must be given.")
if coordinates is None:
dims = [grid[var].dims for var in grid.data_vars][0]
coordinates = np.meshgrid(grid.coords[dims[1]], grid.coords[dims[0]])
if len(set(i.shape for i in coordinates)) != 1:
raise ValueError("Coordinate arrays must have the same shape.")
shape = coordinates[0].shape
if projection is not None:
data_coordinates = projection(*n_1d_arrays(data_coordinates, 2))
coordinates = projection(*n_1d_arrays(coordinates, 2))
tree = KDTree(np.transpose(n_1d_arrays(data_coordinates, 2)))
distance = tree.query(np.transpose(n_1d_arrays(coordinates, 2)))[0].reshape(shape)
mask = distance <= maxdist
if grid is not None:
return grid.where(mask)
return mask
| [
"numpy.meshgrid"
] | [((3958, 4013), 'numpy.meshgrid', 'np.meshgrid', (['grid.coords[dims[1]]', 'grid.coords[dims[0]]'], {}), '(grid.coords[dims[1]], grid.coords[dims[0]])\n', (3969, 4013), True, 'import numpy as np\n')] |
from baumgarte2d import RigidBody, Simulator
from sympy import symbols, latex
import numpy as np
import matplotlib.pyplot as plt
from typing import List
from copy import deepcopy
import os
IMAGE_FILE_PATH: str = "./examples/temp"
def do_simulation(omega: float, save_image: bool):
# 記号・値の定義
t = symbols("t")
sym_ls = [symbols("l_%d" % i) for i in range(5)]
val_ls = [0.1, 0.5, 0.5, 0.3, 0.3]
sym_g, val_g = symbols("g"), 9.8
sym_c, val_c = symbols("c"), 0.1
sym_omega, val_omega = symbols("\\omega"), omega
params = list(zip(sym_ls, val_ls))
params += [(sym_c, val_c)]
params += [(sym_g, val_g), (sym_omega, val_omega)]
# 剛体の定義
blocks: List[RigidBody] = [RigidBody(i) for i in range(5)]
# シミュレーションする環境の構築
simulator = Simulator()
# 初期値の設定.順にx0, y0, theta0
blocks[0].initial_position = np.array([0, val_ls[0], 0])
blocks[1].initial_position = np.array([
0,
blocks[0].initial_position[1] + val_ls[0] + val_ls[1],
0
])
blocks[2].initial_position = np.array([
0,
blocks[1].initial_position[1] + val_ls[1] + val_ls[2],
0
])
blocks[3].initial_position = np.array([
val_ls[3]*np.cos(np.pi/4),
blocks[2].initial_position[1] + val_ls[2] + val_ls[3]*np.sin(np.pi/4),
-np.pi/4
])
blocks[4].initial_position = np.array([
blocks[3].initial_position[0]
+ (val_ls[3] + val_ls[4])*np.cos(np.pi/4),
blocks[3].initial_position[1]
+ (val_ls[3] + val_ls[4])*np.sin(np.pi/4),
-np.pi/4
])
for i in range(5):
# 重力の追加
blocks[i].add_force_y(0-blocks[i].m*sym_g)
# 剛体の追加
simulator.add_rigidbody(blocks[i])
# 描画時の設定
blocks[i].height = val_ls[i]*2
blocks[i].width = 0.02
blocks[i].color = ["red", "green",
"blue", "pink", "orange"][i]
blocks[i].mass = 0.125 * val_ls[i]*2
blocks[i].moment_of_inertia = (4/3)*blocks[i].mass*val_ls[i]**2
# 拘束の追加
# すべての剛体をピンジョイントで拘束する
for i in range(4):
simulator.add_pinjoint_constrain(
(0, +sym_ls[i+0]), blocks[i+0],
(0, -sym_ls[i+1]), blocks[i+1],
dumper=sym_c
)
# 原点に剛体0を拘束
simulator.add_pinjoint_constrain(
(0, -sym_ls[0]), blocks[0],
(0, 0), None
)
# 剛体2を並進拘束する
simulator.add_slide_constrain(
(0, 1), (0, 0), blocks[2],
(0, 1), (0, 0), None
)
# 剛体0の角度を運動拘束する
simulator.add_constrain(blocks[0].theta-sym_omega*t)
# シミュレーション時間を設定しシミュレーション
dt = (2*np.pi/val_omega)/20.0
tmax = 10
num = int(tmax/dt)
ts = np.linspace(0, tmax, num=num)
xs, forces = simulator.simulation(ts, parameters=params, return_force=True)
if save_image:
if not os.path.exists(IMAGE_FILE_PATH):
os.mkdir(IMAGE_FILE_PATH)
simulator.draw_all(ts, xs, 1,
xlim=(-10/3, +10/3),
ylim=(-1, +4),
save_format=IMAGE_FILE_PATH+"/%04d.png")
return ts, xs, forces
def main():
# 使ってみるomegaのリスト
omega_list: List[float] = [10.0, 20.0, 40.0, 80.0]
xs_list = []
ts_list = []
force_list = []
for omega in omega_list:
# omegaを変えてシミュレーションする
ts, xs, forces = do_simulation(omega, False)
xs_list.append(xs)
ts_list.append(ts)
force_list.append(forces)
# 二重振り子の角度をプロットする
plt.figure()
axe1 = plt.subplot(2, 1, 1)
axe2 = plt.subplot(2, 1, 2)
for omega, xs, ts in zip(omega_list, xs_list, ts_list):
axe1.plot(ts, np.degrees(xs[:, 3*3+2]),
label="$\\omega=%5.1lf$ rad/s" % omega)
axe2.plot(ts, np.degrees(xs[:, 3*4+2]),
label="$\\omega=%5.1lf$ rad/s" % omega)
axe1.legend()
axe2.legend()
axe1.set_title("body 3")
axe2.set_title("body 4")
plt.ylabel("Angle [Degree]")
plt.xlabel("Time [s]")
plt.show()
# モーターのトルクをプロットする
plt.figure()
for omega, ts, forces in zip(omega_list, ts_list, force_list):
forces = np.array(forces)
plt.plot(ts, forces[:, 0*3+2], label="$\\omega=%5.1lf$ rad/s" % omega)
plt.legend()
plt.xlabel("Time [s]")
plt.ylabel("Torque[Nm]")
plt.show()
if __name__ == "__main__":
main()
| [
"sympy.symbols",
"matplotlib.pyplot.subplot",
"baumgarte2d.RigidBody",
"matplotlib.pyplot.show",
"os.mkdir",
"baumgarte2d.Simulator",
"matplotlib.pyplot.plot",
"numpy.degrees",
"matplotlib.pyplot.legend",
"os.path.exists",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.lins... | [((306, 318), 'sympy.symbols', 'symbols', (['"""t"""'], {}), "('t')\n", (313, 318), False, 'from sympy import symbols, latex\n'), ((778, 789), 'baumgarte2d.Simulator', 'Simulator', ([], {}), '()\n', (787, 789), False, 'from baumgarte2d import RigidBody, Simulator\n'), ((854, 881), 'numpy.array', 'np.array', (['[0, val_ls[0], 0]'], {}), '([0, val_ls[0], 0])\n', (862, 881), True, 'import numpy as np\n'), ((915, 986), 'numpy.array', 'np.array', (['[0, blocks[0].initial_position[1] + val_ls[0] + val_ls[1], 0]'], {}), '([0, blocks[0].initial_position[1] + val_ls[0] + val_ls[1], 0])\n', (923, 986), True, 'import numpy as np\n'), ((1050, 1121), 'numpy.array', 'np.array', (['[0, blocks[1].initial_position[1] + val_ls[1] + val_ls[2], 0]'], {}), '([0, blocks[1].initial_position[1] + val_ls[1] + val_ls[2], 0])\n', (1058, 1121), True, 'import numpy as np\n'), ((2688, 2717), 'numpy.linspace', 'np.linspace', (['(0)', 'tmax'], {'num': 'num'}), '(0, tmax, num=num)\n', (2699, 2717), True, 'import numpy as np\n'), ((3499, 3511), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3509, 3511), True, 'import matplotlib.pyplot as plt\n'), ((3523, 3543), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (3534, 3543), True, 'import matplotlib.pyplot as plt\n'), ((3555, 3575), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (3566, 3575), True, 'import matplotlib.pyplot as plt\n'), ((3946, 3974), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Angle [Degree]"""'], {}), "('Angle [Degree]')\n", (3956, 3974), True, 'import matplotlib.pyplot as plt\n'), ((3979, 4001), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (3989, 4001), True, 'import matplotlib.pyplot as plt\n'), ((4006, 4016), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4014, 4016), True, 'import matplotlib.pyplot as plt\n'), ((4044, 4056), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4054, 4056), True, 'import matplotlib.pyplot as plt\n'), ((4241, 4253), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4251, 4253), True, 'import matplotlib.pyplot as plt\n'), ((4258, 4280), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (4268, 4280), True, 'import matplotlib.pyplot as plt\n'), ((4285, 4309), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Torque[Nm]"""'], {}), "('Torque[Nm]')\n", (4295, 4309), True, 'import matplotlib.pyplot as plt\n'), ((4314, 4324), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4322, 4324), True, 'import matplotlib.pyplot as plt\n'), ((333, 352), 'sympy.symbols', 'symbols', (["('l_%d' % i)"], {}), "('l_%d' % i)\n", (340, 352), False, 'from sympy import symbols, latex\n'), ((430, 442), 'sympy.symbols', 'symbols', (['"""g"""'], {}), "('g')\n", (437, 442), False, 'from sympy import symbols, latex\n'), ((467, 479), 'sympy.symbols', 'symbols', (['"""c"""'], {}), "('c')\n", (474, 479), False, 'from sympy import symbols, latex\n'), ((512, 530), 'sympy.symbols', 'symbols', (['"""\\\\omega"""'], {}), "('\\\\omega')\n", (519, 530), False, 'from sympy import symbols, latex\n'), ((707, 719), 'baumgarte2d.RigidBody', 'RigidBody', (['i'], {}), '(i)\n', (716, 719), False, 'from baumgarte2d import RigidBody, Simulator\n'), ((4141, 4157), 'numpy.array', 'np.array', (['forces'], {}), '(forces)\n', (4149, 4157), True, 'import numpy as np\n'), ((4166, 4240), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'forces[:, 0 * 3 + 2]'], {'label': "('$\\\\omega=%5.1lf$ rad/s' % omega)"}), "(ts, forces[:, 0 * 3 + 2], label='$\\\\omega=%5.1lf$ rad/s' % omega)\n", (4174, 4240), True, 'import matplotlib.pyplot as plt\n'), ((2833, 2864), 'os.path.exists', 'os.path.exists', (['IMAGE_FILE_PATH'], {}), '(IMAGE_FILE_PATH)\n', (2847, 2864), False, 'import os\n'), ((2878, 2903), 'os.mkdir', 'os.mkdir', (['IMAGE_FILE_PATH'], {}), '(IMAGE_FILE_PATH)\n', (2886, 2903), False, 'import os\n'), ((3658, 3686), 'numpy.degrees', 'np.degrees', (['xs[:, 3 * 3 + 2]'], {}), '(xs[:, 3 * 3 + 2])\n', (3668, 3686), True, 'import numpy as np\n'), ((3764, 3792), 'numpy.degrees', 'np.degrees', (['xs[:, 3 * 4 + 2]'], {}), '(xs[:, 3 * 4 + 2])\n', (3774, 3792), True, 'import numpy as np\n'), ((1214, 1231), 'numpy.cos', 'np.cos', (['(np.pi / 4)'], {}), '(np.pi / 4)\n', (1220, 1231), True, 'import numpy as np\n'), ((1293, 1310), 'numpy.sin', 'np.sin', (['(np.pi / 4)'], {}), '(np.pi / 4)\n', (1299, 1310), True, 'import numpy as np\n'), ((1450, 1467), 'numpy.cos', 'np.cos', (['(np.pi / 4)'], {}), '(np.pi / 4)\n', (1456, 1467), True, 'import numpy as np\n'), ((1539, 1556), 'numpy.sin', 'np.sin', (['(np.pi / 4)'], {}), '(np.pi / 4)\n', (1545, 1556), True, 'import numpy as np\n')] |
import numpy as np
from algos.agent import randombot
from algos.encoders.trojangoPlane import TrojanGoPlane
from algos import gohelper
from algos import godomain
from algos.utils import display_board, alphaNumnericMove_from_point
import time
import math
import h5py
import multiprocessing
from multiprocessing import pool
import threading
import os
import concurrent.futures
from multiprocessing import set_start_method
from multiprocessing import get_context
#set_start_method("spawn")
class ExperienceBuffer:
def __init__(self, model_input, action_target, value_target):
self.model_input = model_input
self.action_target = action_target
self.value_target = value_target
def serialize(self, h5file):
h5file.create_group('experience')
h5file['experience'].create_dataset('model_input', data=self.model_input)
h5file['experience'].create_dataset('action_target', data=self.action_target)
h5file['experience'].create_dataset('value_target', data=self.value_target)
def load_experience(self, h5file):
return ExperienceBuffer(model_input=np.array(h5file['experience']['model_input']),
action_target=np.array(h5file['experience']['action_target']),
value_target=np.array(h5file['experience']['value_target'])
)
def display_experience_buffer(self):
print("Model Input : ")
print(self.model_input)
def save_examples(encoder, exp_buff, game, bot_move, val):
#print("\nsssave_examples PID : ", os.getpid())
board_tensor = encoder.encode(game)
exp_buff.model_input.append(board_tensor)
""" create a flat 26 numpy array"""
search_prob = np.zeros(26)
# Convert the bot_move to particular index
index = 0
if bot_move.is_pass:
index = 25
else:
row = bot_move.point.row
col = bot_move.point.col
index = int(game.board.board_width * row + col)
search_prob[index] = 1
exp_buff.action_target.append(search_prob)
exp_buff.value_target.append(val)
return None
def func_main(dummy):
#with get_context("spawn").Pool() as pool:
#print("\nfunc_main PID : ", os.getpid())
board_size = 5
num_planes = 7
encoder = TrojanGoPlane((board_size, board_size), num_planes)
game = godomain.GameState.new_game(board_size)
bots = {
gohelper.Player.black: randombot.RandomBot(),
gohelper.Player.white: randombot.RandomBot(),
}
moves = 0
start = time.time()
model_input = []
action_target = []
value_target = []
exp_buff = ExperienceBuffer(model_input, action_target, value_target)
while not game.is_over():
moves = moves + 1
#game.board.display_board()
#display_board(game.board)
bot_move = bots[game.next_player].select_move(game)
#print_move(game.next_player, bot_move)
"""
if bot_move.is_pass:
print(game.next_player, "PASS")
else:
print(game.next_player, alphaNumnericMove_from_point(bot_move.point))
"""
game = game.apply_move(bot_move)
"""store the input_tensor, action, value = 1 as of now"""
save_examples(encoder, exp_buff, game, bot_move, 1)
finish = time.time()
#game.board.display_board()
#display_board(game.board)
#print("Total moves : ", moves)
#print("Winner is ", game.winner())
#win_rec[game.winner()] = win_rec[game.winner()] + 1
#print("Time taken to play a game is {} secs".format(finish - start))
return exp_buff
if __name__ == '__main__':
model_input = []
#model_input = np.array(model_input)
action_target = []
#action_target = np.array(action_target)
value_target = []
#value_target = np.array(value_target)
#with h5py.File('experience_1.hdf5', 'w') as exp_outf:
exp_buff = ExperienceBuffer(model_input, action_target, value_target)
total_games = 1000
start = time.time()
win_rec = [0, 0 ,0] # draw, Black wins, White wins
CONNECTIONS = 100 #100
TIMEOUT = 5
out = []
dummy = 10
with concurrent.futures.ThreadPoolExecutor(max_workers=CONNECTIONS) as executor:
#with concurrent.futures.ProcessPoolExecutor() as executor:
results = (executor.submit(func_main, dummy) for _ in range(total_games))
for result in concurrent.futures.as_completed(results):
try:
#print("Got the exp_buff")
exp_buff = result.result() # return ExperienceBuffer object
#print(exp_buff.value_target)
except Exception as exc:
print("Invalid result")
#exp_buffer = str(type(exc))
finally:
#out = out.append(exp_buff)
#print("Append to all 3 lists ...")
model_input.append(exp_buff.model_input)
action_target.append(exp_buff.action_target)
value_target.append(exp_buff.value_target)
"""
# Play total_games games
for i in range(total_games):
main(win_rec, exp_buff)
"""
"""
for exp_buff in out:
model_input.append(exp_buff.model_input)
action_target.append(exp_buff.action_target)
value_target.append(exp_buff.value_target)
"""
value_target_flat_list = [item for sublist in value_target for item in sublist]
#value_target_flat_list = lambda value_target: [item for sublist in value_target for item in sublist]
#print("value_target_flat_list :", len(value_target_flat_list))
#print(value_target_flat_list)
action_target_flat_list = [item for sublist in action_target for item in sublist]
#print("action_target_flat_list :", len(action_target_flat_list))
#print(action_target_flat_list)
model_input_flat_list = [item for sublist in model_input for item in sublist]
#print("model_input_flat_list :", len(model_input_flat_list))
#print(model_input_flat_list)
# Convert list to a np.array and save to file.
model_input = np.array(model_input_flat_list)
action_target = np.array(action_target_flat_list)
value_target = np.array(value_target_flat_list)
#print("shape : ", model_input.shape, action_target.shape, value_target.shape)
#print("len action val : \n", len(action_target), len(value_target))
with h5py.File('experience_P10.hdf5', 'w') as exp_out:
ExperienceBuffer(model_input, action_target, value_target).serialize(exp_out)
"""
with h5py.File('experience_3.hdf5', 'r') as exp_input:
experience_buffer = ExperienceBuffer(model_input, action_target, value_target).load_experience(exp_input)
print("Input Model ...")
print(experience_buffer.model_input.shape)
print("Action Target ...")
print(experience_buffer.action_target.shape)
"""
finish = time.time()
print("Time taken to play {} games is {} secs".format(total_games, math.floor(finish - start)))
#print("Draws: {} Black wins: {} White wins: {} ".format(win_rec[0],win_rec[1], win_rec[2]))
| [
"h5py.File",
"algos.godomain.GameState.new_game",
"numpy.zeros",
"math.floor",
"time.time",
"numpy.array",
"algos.encoders.trojangoPlane.TrojanGoPlane",
"algos.agent.randombot.RandomBot"
] | [((1767, 1779), 'numpy.zeros', 'np.zeros', (['(26)'], {}), '(26)\n', (1775, 1779), True, 'import numpy as np\n'), ((2348, 2399), 'algos.encoders.trojangoPlane.TrojanGoPlane', 'TrojanGoPlane', (['(board_size, board_size)', 'num_planes'], {}), '((board_size, board_size), num_planes)\n', (2361, 2399), False, 'from algos.encoders.trojangoPlane import TrojanGoPlane\n'), ((2416, 2455), 'algos.godomain.GameState.new_game', 'godomain.GameState.new_game', (['board_size'], {}), '(board_size)\n', (2443, 2455), False, 'from algos import godomain\n'), ((2609, 2620), 'time.time', 'time.time', ([], {}), '()\n', (2618, 2620), False, 'import time\n'), ((3416, 3427), 'time.time', 'time.time', ([], {}), '()\n', (3425, 3427), False, 'import time\n'), ((4126, 4137), 'time.time', 'time.time', ([], {}), '()\n', (4135, 4137), False, 'import time\n'), ((6209, 6240), 'numpy.array', 'np.array', (['model_input_flat_list'], {}), '(model_input_flat_list)\n', (6217, 6240), True, 'import numpy as np\n'), ((6261, 6294), 'numpy.array', 'np.array', (['action_target_flat_list'], {}), '(action_target_flat_list)\n', (6269, 6294), True, 'import numpy as np\n'), ((6314, 6346), 'numpy.array', 'np.array', (['value_target_flat_list'], {}), '(value_target_flat_list)\n', (6322, 6346), True, 'import numpy as np\n'), ((7020, 7031), 'time.time', 'time.time', ([], {}), '()\n', (7029, 7031), False, 'import time\n'), ((2500, 2521), 'algos.agent.randombot.RandomBot', 'randombot.RandomBot', ([], {}), '()\n', (2519, 2521), False, 'from algos.agent import randombot\n'), ((2554, 2575), 'algos.agent.randombot.RandomBot', 'randombot.RandomBot', ([], {}), '()\n', (2573, 2575), False, 'from algos.agent import randombot\n'), ((6514, 6551), 'h5py.File', 'h5py.File', (['"""experience_P10.hdf5"""', '"""w"""'], {}), "('experience_P10.hdf5', 'w')\n", (6523, 6551), False, 'import h5py\n'), ((7103, 7129), 'math.floor', 'math.floor', (['(finish - start)'], {}), '(finish - start)\n', (7113, 7129), False, 'import math\n'), ((1129, 1174), 'numpy.array', 'np.array', (["h5file['experience']['model_input']"], {}), "(h5file['experience']['model_input'])\n", (1137, 1174), True, 'import numpy as np\n'), ((1221, 1268), 'numpy.array', 'np.array', (["h5file['experience']['action_target']"], {}), "(h5file['experience']['action_target'])\n", (1229, 1268), True, 'import numpy as np\n'), ((1314, 1360), 'numpy.array', 'np.array', (["h5file['experience']['value_target']"], {}), "(h5file['experience']['value_target'])\n", (1322, 1360), True, 'import numpy as np\n')] |
import logging
from typing import List, Set
import numpy as np
from tqdm import tqdm
from src.common.audioviz_dataset import AudiovizDataset
from src.common.audioviz_datastore import AbstractAudiovizDataStore
from src.common.fun_call import FunCall
class FuncallStore:
"""
Represents a collection of features calculated for a dataset
"""
def __init__(self, dataset: AudiovizDataset, store: AbstractAudiovizDataStore):
self._dataset = dataset
self._store = store
self.__funcalls: List[FunCall] = []
def update(self, funcalls: List[FunCall]):
with self._store:
for fa in funcalls:
self.add(fa)
@property
def funcall_ids(self):
return [f.__repr__() for f in self.__funcalls]
@property
def funcalls(self):
return self.__funcalls[:]
def __str__(self):
return "".join([str(f) for f in self.__funcalls])
def __repr__(self):
return "".join([f.__repr__() for f in self.__funcalls])
def __len__(self):
return len(self.__funcalls)
def __getitem__(self, key: FunCall):
if key in self:
return self._store[key.__repr__()]
else:
raise KeyError(key)
def __contains__(self, item: FunCall):
return item.__repr__() in self.funcall_ids
def __in_store(self, item: FunCall):
return item.__repr__() in self._store
def add(self, funcall: FunCall):
# TODO refactor to decorator
was_open = bool(self._store)
if not was_open:
self._store.open()
try:
if funcall in self:
logging.info(
f"Funcall {funcall} is already in this {self.__class__.__name__}. Skipping ..."
)
else:
if not self.__in_store(funcall):
self._process(funcall)
self.__funcalls.append(funcall)
finally:
if not was_open:
self._store.close()
def get_as_matrix(self, rows=None):
# TODO implement iterator for FeatureCollection for convenience this then becomes "for fe in self"
if not self.funcall_ids:
return np.ndarray([])
sl = rows if rows is not None else slice(None, None)
return np.concatenate([self[fe][sl] for fe in self.__funcalls], axis=1)
def _process(self, funcall: FunCall, chunksize=1000):
logging.info(f"Calculating funcall {funcall.name} with chunksize {chunksize}")
with self._store, self._dataset._store:
len_samples = self._dataset.shape[0]
pbar = tqdm(total=len_samples)
pbar.set_description(funcall.name)
ds = None
for i, chunk in enumerate(
self._dataset.map_chunked(funcall, chunksize), start=1,
):
# We do this inside the loop because we need chunk[0].shape
if ds is None:
logging.info(f"Starting to calculate feature {funcall.name}")
ds = self._store._file.create_dataset(
funcall.__repr__(), (len_samples, *chunk[0].shape),
)
chunk_start = i * chunksize - chunksize
chunk_end = i * chunksize
ds[chunk_start:chunk_end] = chunk
msg = f"Processed chunk [{chunk_start}:{chunk_end}]"
logging.debug(msg)
pbar.update(chunksize)
elapsed = pbar.format_dict["elapsed"]
ds.attrs["time"] = elapsed
self.__funcalls.append(funcall)
| [
"tqdm.tqdm",
"logging.debug",
"logging.info",
"numpy.ndarray",
"numpy.concatenate"
] | [((2313, 2377), 'numpy.concatenate', 'np.concatenate', (['[self[fe][sl] for fe in self.__funcalls]'], {'axis': '(1)'}), '([self[fe][sl] for fe in self.__funcalls], axis=1)\n', (2327, 2377), True, 'import numpy as np\n'), ((2445, 2523), 'logging.info', 'logging.info', (['f"""Calculating funcall {funcall.name} with chunksize {chunksize}"""'], {}), "(f'Calculating funcall {funcall.name} with chunksize {chunksize}')\n", (2457, 2523), False, 'import logging\n'), ((2222, 2236), 'numpy.ndarray', 'np.ndarray', (['[]'], {}), '([])\n', (2232, 2236), True, 'import numpy as np\n'), ((2640, 2663), 'tqdm.tqdm', 'tqdm', ([], {'total': 'len_samples'}), '(total=len_samples)\n', (2644, 2663), False, 'from tqdm import tqdm\n'), ((1650, 1753), 'logging.info', 'logging.info', (['f"""Funcall {funcall} is already in this {self.__class__.__name__}. Skipping ..."""'], {}), "(\n f'Funcall {funcall} is already in this {self.__class__.__name__}. Skipping ...'\n )\n", (1662, 1753), False, 'import logging\n'), ((3439, 3457), 'logging.debug', 'logging.debug', (['msg'], {}), '(msg)\n', (3452, 3457), False, 'import logging\n'), ((2986, 3047), 'logging.info', 'logging.info', (['f"""Starting to calculate feature {funcall.name}"""'], {}), "(f'Starting to calculate feature {funcall.name}')\n", (2998, 3047), False, 'import logging\n')] |
import itertools
import warnings
import numpy as np
from .base import imgfeature, ndfeature
@ndfeature
def gradient(pixels):
r"""
Calculates the gradient of an input image. The image is assumed to have
channel information on the first axis. In the case of multiple channels,
it returns the gradient over each axis over each channel as the first axis.
The gradient is computed using second order accurate central differences in
the interior and first order accurate one-side (forward or backwards)
differences at the boundaries.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array where the first dimension
is interpreted as channels. This means an N-dimensional image is
represented by an N+1 dimensional array.
If the image is 2-dimensional the pixels should be of type
float/double (int is not supported).
Returns
-------
gradient : `ndarray`
The gradient over each axis over each channel. Therefore, the
first axis of the gradient of a 2D, single channel image, will have
length `2`. The first axis of the gradient of a 2D, 3-channel image,
will have length `6`, the ordering being
``I[:, 0, 0] = [R0_y, G0_y, B0_y, R0_x, G0_x, B0_x]``. To be clear,
all the ``y``-gradients are returned over each channel, then all
the ``x``-gradients.
"""
if pixels.dtype == np.uint8:
raise TypeError("Attempting to take the gradient on a uint8 image.")
n_dims = pixels.ndim - 1
grad_per_dim_per_channel = [np.gradient(g, edge_order=1) for g in pixels]
# Flatten out the separate dims
grad_per_channel = list(itertools.chain.from_iterable(grad_per_dim_per_channel))
# Add a channel axis for broadcasting
grad_per_channel = [g[None, ...] for g in grad_per_channel]
# Permute the list so it is first axis, second axis, etc
grad_per_channel = [grad_per_channel[i::n_dims] for i in range(n_dims)]
grad_per_channel = list(itertools.chain.from_iterable(grad_per_channel))
# Concatenate gradient list into an array (the new_image)
return np.concatenate(grad_per_channel, axis=0)
@ndfeature
def gaussian_filter(pixels, sigma):
r"""
Calculates the convolution of the input image with a multidimensional
Gaussian filter.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
sigma : `float` or `list` of `float`
The standard deviation for Gaussian kernel. The standard deviations of
the Gaussian filter are given for each axis as a `list`, or as a single
`float`, in which case it is equal for all axes.
Returns
-------
output_image : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The filtered image has the same type and size as the input ``pixels``.
"""
from scipy.ndimage import gaussian_filter as scipy_gaussian_filter # expensive
output = np.empty(pixels.shape, dtype=pixels.dtype)
for dim in range(pixels.shape[0]):
scipy_gaussian_filter(pixels[dim], sigma, output=output[dim])
return output
@ndfeature
def igo(pixels, double_angles=False, verbose=False):
r"""
Extracts Image Gradient Orientation (IGO) features from the input image.
The output image has ``N * C`` number of channels, where ``N`` is the
number of channels of the original image and ``C = 2`` or ``C = 4``
depending on whether double angles are used.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
double_angles : `bool`, optional
Assume that ``phi`` represents the gradient orientations.
If this flag is ``False``, the features image is the concatenation of
``cos(phi)`` and ``sin(phi)``, thus 2 channels.
If ``True``, the features image is the concatenation of
``cos(phi)``, ``sin(phi)``, ``cos(2 * phi)``, ``sin(2 * phi)``, thus 4
channels.
verbose : `bool`, optional
Flag to print IGO related information.
Returns
-------
igo : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The IGO features image. It has the same type and shape as the input
``pixels``. The output number of channels depends on the
``double_angles`` flag.
Raises
------
ValueError
Image has to be 2D in order to extract IGOs.
References
----------
.. [1] <NAME>, <NAME> and <NAME>, "Subspace learning
from image gradient orientations", IEEE Transactions on Pattern Analysis
and Machine Intelligence, vol. 34, num. 12, p. 2454--2466, 2012.
"""
# check number of dimensions
if len(pixels.shape) != 3:
raise ValueError(
"IGOs only work on 2D images. Expects image data "
"to be 3D, channels + shape."
)
n_img_chnls = pixels.shape[0]
# feature channels per image channel
feat_chnls = 2
if double_angles:
feat_chnls = 4
# compute gradients
grad = gradient(pixels)
# compute angles
grad_orient = np.angle(grad[:n_img_chnls] + 1j * grad[n_img_chnls:])
# compute igo image
igo_pixels = np.empty(
(n_img_chnls * feat_chnls, pixels.shape[1], pixels.shape[2]), dtype=pixels.dtype
)
if double_angles:
dbl_grad_orient = 2 * grad_orient
# y angles
igo_pixels[:n_img_chnls] = np.sin(grad_orient)
igo_pixels[n_img_chnls : n_img_chnls * 2] = np.sin(dbl_grad_orient)
# x angles
igo_pixels[n_img_chnls * 2 : n_img_chnls * 3] = np.cos(grad_orient)
igo_pixels[n_img_chnls * 3 :] = np.cos(dbl_grad_orient)
else:
igo_pixels[:n_img_chnls] = np.sin(grad_orient) # y
igo_pixels[n_img_chnls:] = np.cos(grad_orient) # x
# print information
if verbose:
info_str = "IGO Features:\n"
info_str = "{} - Input image is {}W x {}H with {} channels.\n".format(
info_str, pixels.shape[2], pixels.shape[1], n_img_chnls
)
info_str = "{} - Double angles are {}.\n".format(
info_str, "enabled" if double_angles else "disabled"
)
info_str = "{}Output image size {}W x {}H with {} channels.".format(
info_str, igo_pixels.shape[2], igo_pixels.shape[1], n_img_chnls
)
print(info_str)
return igo_pixels
@ndfeature
def es(pixels, verbose=False):
r"""
Extracts Edge Structure (ES) features from the input image. The output image
has ``N * C`` number of channels, where ``N`` is the number of channels of
the original image and ``C = 2``.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either an image object itself or an array where the first axis
represents the number of channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
verbose : `bool`, optional
Flag to print ES related information.
Returns
-------
es : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The ES features image. It has the same type and shape as the input
``pixels``. The output number of channels is ``C = 2``.
Raises
------
ValueError
Image has to be 2D in order to extract ES features.
References
----------
.. [1] <NAME>, <NAME>, "On representing edge structure for model
matching", Proceedings of the IEEE Conference on Computer Vision and
Pattern Recognition (CVPR), 2001.
"""
# check number of dimensions
if len(pixels.shape) != 3:
raise ValueError(
"ES features only work on 2D images. Expects "
"image data to be 3D, channels + shape."
)
n_img_chnls = pixels.shape[0]
# feature channels per image channel
feat_channels = 2
# compute gradients
grad = gradient(pixels)
# compute magnitude
grad_abs = np.abs(grad[:n_img_chnls] + 1j * grad[n_img_chnls:])
# compute es image
grad_abs = grad_abs + np.median(grad_abs)
es_pixels = np.empty(
(pixels.shape[0] * feat_channels, pixels.shape[1], pixels.shape[2]),
dtype=pixels.dtype,
)
es_pixels[:n_img_chnls] = grad[:n_img_chnls] / grad_abs
es_pixels[n_img_chnls:] = grad[n_img_chnls:] / grad_abs
# print information
if verbose:
info_str = "ES Features:\n"
info_str = "{} - Input image is {}W x {}H with {} channels.\n".format(
info_str, pixels.shape[2], pixels.shape[1], n_img_chnls
)
info_str = "{}Output image size {}W x {}H with {} channels.".format(
info_str, es_pixels.shape[2], es_pixels.shape[1], n_img_chnls
)
print(info_str)
return es_pixels
@ndfeature
def daisy(
pixels,
step=1,
radius=15,
rings=2,
histograms=2,
orientations=8,
normalization="l1",
sigmas=None,
ring_radii=None,
verbose=False,
):
r"""
Extracts Daisy features from the input image. The output image has ``N * C``
number of channels, where ``N`` is the number of channels of the original
image and ``C`` is the feature channels determined by the input options.
Specifically, ``C = (rings * histograms + 1) * orientations``.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
step : `int`, optional
The sampling step that defines the density of the output image.
radius : `int`, optional
The radius (in pixels) of the outermost ring.
rings : `int`, optional
The number of rings to be used.
histograms : `int`, optional
The number of histograms sampled per ring.
orientations : `int`, optional
The number of orientations (bins) per histogram.
normalization : [ 'l1', 'l2', 'daisy', None ], optional
It defines how to normalize the descriptors
If 'l1' then L1-normalization is applied at each descriptor.
If 'l2' then L2-normalization is applied at each descriptor.
If 'daisy' then L2-normalization is applied at individual histograms.
If None then no normalization is employed.
sigmas : `list` of `float` or ``None``, optional
Standard deviation of spatial Gaussian smoothing for the centre
histogram and for each ring of histograms. The `list` of sigmas should
be sorted from the centre and out. I.e. the first sigma value defines
the spatial smoothing of the centre histogram and the last sigma value
defines the spatial smoothing of the outermost ring. Specifying sigmas
overrides the `rings` parameter by setting ``rings = len(sigmas) - 1``.
ring_radii : `list` of `float` or ``None``, optional
Radius (in pixels) for each ring. Specifying `ring_radii` overrides the
`rings` and `radius` parameters by setting ``rings = len(ring_radii)``
and ``radius = ring_radii[-1]``.
If both sigmas and ring_radii are given, they must satisfy ::
len(ring_radii) == len(sigmas) + 1
since no radius is needed for the centre histogram.
verbose : `bool`
Flag to print Daisy related information.
Returns
-------
daisy : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The ES features image. It has the same type and shape as the input
``pixels``. The output number of channels is
``C = (rings * histograms + 1) * orientations``.
Raises
------
ValueError
len(sigmas)-1 != len(ring_radii)
ValueError
Invalid normalization method.
References
----------
.. [1] <NAME>, <NAME> and <NAME>, "Daisy: An efficient dense descriptor
applied to wide-baseline stereo", IEEE Transactions on Pattern Analysis
and Machine Intelligence, vol. 32, num. 5, p. 815-830, 2010.
"""
from menpo.external.skimage._daisy import _daisy
# Parse options
if (
sigmas is not None
and ring_radii is not None
and len(sigmas) - 1 != len(ring_radii)
):
raise ValueError("`len(sigmas)-1 != len(ring_radii)`")
if ring_radii is not None:
rings = len(ring_radii)
radius = ring_radii[-1]
if sigmas is not None:
rings = len(sigmas) - 1
if sigmas is None:
sigmas = [radius * (i + 1) / float(2 * rings) for i in range(rings)]
if ring_radii is None:
ring_radii = [radius * (i + 1) / float(rings) for i in range(rings)]
if normalization is None:
normalization = "off"
if normalization not in ["l1", "l2", "daisy", "off"]:
raise ValueError("Invalid normalization method.")
# Compute daisy features
daisy_descriptor = _daisy(
pixels,
step=step,
radius=radius,
rings=rings,
histograms=histograms,
orientations=orientations,
normalization=normalization,
sigmas=sigmas,
ring_radii=ring_radii,
)
# print information
if verbose:
info_str = "Daisy Features:\n"
info_str = "{} - Input image is {}W x {}H with {} channels.\n".format(
info_str, pixels.shape[2], pixels.shape[1], pixels.shape[0]
)
info_str = "{} - Sampling step is {}.\n".format(info_str, step)
info_str = (
"{} - Radius of {} pixels, {} rings and {} histograms "
"with {} orientations.\n".format(
info_str, radius, rings, histograms, orientations
)
)
if not normalization == "off":
info_str = "{} - Using {} normalization.\n".format(info_str, normalization)
else:
info_str = "{} - No normalization emplyed.\n".format(info_str)
info_str = "{}Output image size {}W x {}H x {}.".format(
info_str,
daisy_descriptor.shape[2],
daisy_descriptor.shape[1],
daisy_descriptor.shape[0],
)
print(info_str)
return daisy_descriptor
@imgfeature
def normalize(img, scale_func=None, mode="all", error_on_divide_by_zero=True):
r"""
Normalize the pixel values via mean centering and an optional scaling. By
default the scaling will be ``1.0``. The ``mode`` parameter selects
whether the normalisation is computed across all pixels in the image or
per-channel.
Parameters
----------
img : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
scale_func : `callable`, optional
Compute the scaling factor. Expects a single parameter and an optional
`axis` keyword argument and will be passed the entire pixel array.
Should return a 1D numpy array of one or more values.
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
error_on_divide_by_zero : `bool`, optional
If ``True``, will raise a ``ValueError`` on dividing by zero.
If ``False``, will merely raise a warning and only those values
with non-zero denominators will be normalized.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A normalized copy of the image that was passed in.
Raises
------
ValueError
If any of the denominators are 0 and ``error_on_divide_by_zero`` is
``True``.
"""
if scale_func is None:
def scale_func(_, axis=None):
return np.array([1.0])
pixels = img.as_vector(keep_channels=True)
if mode == "all":
centered_pixels = pixels - np.mean(pixels)
scale_factor = scale_func(centered_pixels)
elif mode == "per_channel":
centered_pixels = pixels - np.mean(pixels, axis=1, keepdims=True)
scale_factor = scale_func(centered_pixels, axis=1).reshape([-1, 1])
else:
raise ValueError(
"Supported modes are {{'all', 'per_channel'}} - '{}' "
"is not known".format(mode)
)
zero_denom = (scale_factor == 0).ravel()
any_non_zero = np.any(zero_denom)
if error_on_divide_by_zero and any_non_zero:
raise ValueError("Computed scale factor cannot be 0.0")
elif any_non_zero:
warnings.warn(
"One or more the scale factors are 0.0 and thus these"
"entries will be skipped during normalization."
)
non_zero_denom = ~zero_denom
centered_pixels[non_zero_denom] = (
centered_pixels[non_zero_denom] / scale_factor[non_zero_denom]
)
return img.from_vector(centered_pixels)
else:
return img.from_vector(centered_pixels / scale_factor)
@ndfeature
def normalize_norm(pixels, mode="all", error_on_divide_by_zero=True):
r"""
Normalize the pixels to be mean centred and have unit norm. The ``mode``
parameter selects whether the normalisation is computed across all pixels in
the image or per-channel.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
error_on_divide_by_zero : `bool`, optional
If ``True``, will raise a ``ValueError`` on dividing by zero.
If ``False``, will merely raise a warning and only those values
with non-zero denominators will be normalized.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A normalized copy of the image that was passed in.
Raises
------
ValueError
If any of the denominators are 0 and ``error_on_divide_by_zero`` is
``True``.
"""
def unit_norm(x, axis=None):
return np.linalg.norm(x, axis=axis)
return normalize(
pixels,
scale_func=unit_norm,
mode=mode,
error_on_divide_by_zero=error_on_divide_by_zero,
)
@ndfeature
def normalize_std(pixels, mode="all", error_on_divide_by_zero=True):
r"""
Normalize the pixels to be mean centred and have unit standard deviation.
The ``mode`` parameter selects whether the normalisation is computed across
all pixels in the image or per-channel.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
error_on_divide_by_zero : `bool`, optional
If ``True``, will raise a ``ValueError`` on dividing by zero.
If ``False``, will merely raise a warning and only those values
with non-zero denominators will be normalized.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A normalized copy of the image that was passed in.
Raises
------
ValueError
If any of the denominators are 0 and ``error_on_divide_by_zero`` is
``True``.
"""
def unit_std(x, axis=None):
return np.std(x, axis=axis)
return normalize(
pixels,
scale_func=unit_std,
mode=mode,
error_on_divide_by_zero=error_on_divide_by_zero,
)
@ndfeature
def normalize_var(pixels, mode="all", error_on_divide_by_zero=True):
r"""
Normalize the pixels to be mean centred and normalize according
to the variance.
The ``mode`` parameter selects whether the normalisation is computed across
all pixels in the image or per-channel.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
error_on_divide_by_zero : `bool`, optional
If ``True``, will raise a ``ValueError`` on dividing by zero.
If ``False``, will merely raise a warning and only those values
with non-zero denominators will be normalized.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A normalized copy of the image that was passed in.
Raises
------
ValueError
If any of the denominators are 0 and ``error_on_divide_by_zero`` is
``True``.
"""
def unit_var(x, axis=None):
return np.var(x, axis=axis)
return normalize(
pixels,
scale_func=unit_var,
mode=mode,
error_on_divide_by_zero=error_on_divide_by_zero,
)
@ndfeature
def no_op(pixels):
r"""
A no operation feature - does nothing but return a copy of the pixels
passed in.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A copy of the image that was passed in.
"""
return pixels.copy()
| [
"numpy.abs",
"numpy.median",
"numpy.empty",
"numpy.angle",
"scipy.ndimage.gaussian_filter",
"numpy.std",
"numpy.var",
"menpo.external.skimage._daisy._daisy",
"numpy.any",
"numpy.gradient",
"numpy.sin",
"numpy.linalg.norm",
"numpy.array",
"numpy.cos",
"numpy.mean",
"warnings.warn",
"i... | [((2217, 2257), 'numpy.concatenate', 'np.concatenate', (['grad_per_channel'], {'axis': '(0)'}), '(grad_per_channel, axis=0)\n', (2231, 2257), True, 'import numpy as np\n'), ((3271, 3313), 'numpy.empty', 'np.empty', (['pixels.shape'], {'dtype': 'pixels.dtype'}), '(pixels.shape, dtype=pixels.dtype)\n', (3279, 3313), True, 'import numpy as np\n'), ((5625, 5681), 'numpy.angle', 'np.angle', (['(grad[:n_img_chnls] + 1.0j * grad[n_img_chnls:])'], {}), '(grad[:n_img_chnls] + 1.0j * grad[n_img_chnls:])\n', (5633, 5681), True, 'import numpy as np\n'), ((5721, 5815), 'numpy.empty', 'np.empty', (['(n_img_chnls * feat_chnls, pixels.shape[1], pixels.shape[2])'], {'dtype': 'pixels.dtype'}), '((n_img_chnls * feat_chnls, pixels.shape[1], pixels.shape[2]),\n dtype=pixels.dtype)\n', (5729, 5815), True, 'import numpy as np\n'), ((8500, 8554), 'numpy.abs', 'np.abs', (['(grad[:n_img_chnls] + 1.0j * grad[n_img_chnls:])'], {}), '(grad[:n_img_chnls] + 1.0j * grad[n_img_chnls:])\n', (8506, 8554), True, 'import numpy as np\n'), ((8638, 8740), 'numpy.empty', 'np.empty', (['(pixels.shape[0] * feat_channels, pixels.shape[1], pixels.shape[2])'], {'dtype': 'pixels.dtype'}), '((pixels.shape[0] * feat_channels, pixels.shape[1], pixels.shape[2]\n ), dtype=pixels.dtype)\n', (8646, 8740), True, 'import numpy as np\n'), ((13504, 13682), 'menpo.external.skimage._daisy._daisy', '_daisy', (['pixels'], {'step': 'step', 'radius': 'radius', 'rings': 'rings', 'histograms': 'histograms', 'orientations': 'orientations', 'normalization': 'normalization', 'sigmas': 'sigmas', 'ring_radii': 'ring_radii'}), '(pixels, step=step, radius=radius, rings=rings, histograms=histograms,\n orientations=orientations, normalization=normalization, sigmas=sigmas,\n ring_radii=ring_radii)\n', (13510, 13682), False, 'from menpo.external.skimage._daisy import _daisy\n'), ((17115, 17133), 'numpy.any', 'np.any', (['zero_denom'], {}), '(zero_denom)\n', (17121, 17133), True, 'import numpy as np\n'), ((1655, 1683), 'numpy.gradient', 'np.gradient', (['g'], {'edge_order': '(1)'}), '(g, edge_order=1)\n', (1666, 1683), True, 'import numpy as np\n'), ((1765, 1820), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['grad_per_dim_per_channel'], {}), '(grad_per_dim_per_channel)\n', (1794, 1820), False, 'import itertools\n'), ((2094, 2141), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['grad_per_channel'], {}), '(grad_per_channel)\n', (2123, 2141), False, 'import itertools\n'), ((3361, 3422), 'scipy.ndimage.gaussian_filter', 'scipy_gaussian_filter', (['pixels[dim]', 'sigma'], {'output': 'output[dim]'}), '(pixels[dim], sigma, output=output[dim])\n', (3382, 3422), True, 'from scipy.ndimage import gaussian_filter as scipy_gaussian_filter\n'), ((5945, 5964), 'numpy.sin', 'np.sin', (['grad_orient'], {}), '(grad_orient)\n', (5951, 5964), True, 'import numpy as np\n'), ((6017, 6040), 'numpy.sin', 'np.sin', (['dbl_grad_orient'], {}), '(dbl_grad_orient)\n', (6023, 6040), True, 'import numpy as np\n'), ((6117, 6136), 'numpy.cos', 'np.cos', (['grad_orient'], {}), '(grad_orient)\n', (6123, 6136), True, 'import numpy as np\n'), ((6177, 6200), 'numpy.cos', 'np.cos', (['dbl_grad_orient'], {}), '(dbl_grad_orient)\n', (6183, 6200), True, 'import numpy as np\n'), ((6246, 6265), 'numpy.sin', 'np.sin', (['grad_orient'], {}), '(grad_orient)\n', (6252, 6265), True, 'import numpy as np\n'), ((6306, 6325), 'numpy.cos', 'np.cos', (['grad_orient'], {}), '(grad_orient)\n', (6312, 6325), True, 'import numpy as np\n'), ((8602, 8621), 'numpy.median', 'np.median', (['grad_abs'], {}), '(grad_abs)\n', (8611, 8621), True, 'import numpy as np\n'), ((19111, 19139), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (19125, 19139), True, 'import numpy as np\n'), ((20697, 20717), 'numpy.std', 'np.std', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (20703, 20717), True, 'import numpy as np\n'), ((22285, 22305), 'numpy.var', 'np.var', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (22291, 22305), True, 'import numpy as np\n'), ((16526, 16541), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (16534, 16541), True, 'import numpy as np\n'), ((16648, 16663), 'numpy.mean', 'np.mean', (['pixels'], {}), '(pixels)\n', (16655, 16663), True, 'import numpy as np\n'), ((17278, 17402), 'warnings.warn', 'warnings.warn', (['"""One or more the scale factors are 0.0 and thus theseentries will be skipped during normalization."""'], {}), "(\n 'One or more the scale factors are 0.0 and thus theseentries will be skipped during normalization.'\n )\n", (17291, 17402), False, 'import warnings\n'), ((16782, 16820), 'numpy.mean', 'np.mean', (['pixels'], {'axis': '(1)', 'keepdims': '(True)'}), '(pixels, axis=1, keepdims=True)\n', (16789, 16820), True, 'import numpy as np\n')] |
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Several baseline e2e simple arithmetic tests."""
from absl import app
from iree.tf.support import tf_test_utils
from iree.tf.support import tf_utils
import numpy as np
import tensorflow.compat.v2 as tf
class SimpleArithmeticModule(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([4], tf.float32),
tf.TensorSpec([4], tf.float32)
])
def simple_mul(self, a, b):
return a * b
@tf.function(input_signature=[
tf.TensorSpec([128, 3072], tf.float32),
tf.TensorSpec([3072, 256], tf.float32),
])
def simple_matmul(self, a, b):
return tf.matmul(a, b)
class SimpleArithmeticTest(tf_test_utils.TracedModuleTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._modules = tf_test_utils.compile_tf_module(SimpleArithmeticModule)
def test_simple_mul(self):
def simple_mul(module):
a = np.array([1., 2., 3., 4.], dtype=np.float32)
b = np.array([400., 5., 6., 7.], dtype=np.float32)
c = module.simple_mul(a, b)
module.simple_mul(a, c)
self.compare_backends(simple_mul, self._modules)
def test_simple_matmul(self):
def simple_matmul(module):
# Note: scaling by a small value to increase numerical stability.
a = tf_utils.uniform((128, 3072)) * 1e-3
b = tf_utils.uniform((3072, 256)) * 1e-3
module.simple_matmul(a, b)
self.compare_backends(simple_matmul, self._modules)
def main(argv):
del argv # Unused
if hasattr(tf, 'enable_v2_behavior'):
tf.enable_v2_behavior()
tf.test.main()
if __name__ == '__main__':
app.run(main)
| [
"iree.tf.support.tf_utils.uniform",
"iree.tf.support.tf_test_utils.compile_tf_module",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.compat.v2.TensorSpec",
"absl.app.run",
"numpy.array",
"tensorflow.compat.v2.matmul"
] | [((2135, 2149), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (2147, 2149), True, 'import tensorflow.compat.v2 as tf\n'), ((2181, 2194), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (2188, 2194), False, 'from absl import app\n'), ((1180, 1195), 'tensorflow.compat.v2.matmul', 'tf.matmul', (['a', 'b'], {}), '(a, b)\n', (1189, 1195), True, 'import tensorflow.compat.v2 as tf\n'), ((1360, 1415), 'iree.tf.support.tf_test_utils.compile_tf_module', 'tf_test_utils.compile_tf_module', (['SimpleArithmeticModule'], {}), '(SimpleArithmeticModule)\n', (1391, 1415), False, 'from iree.tf.support import tf_test_utils\n'), ((2109, 2132), 'tensorflow.compat.v2.enable_v2_behavior', 'tf.enable_v2_behavior', ([], {}), '()\n', (2130, 2132), True, 'import tensorflow.compat.v2 as tf\n'), ((1485, 1533), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0, 4.0]'], {'dtype': 'np.float32'}), '([1.0, 2.0, 3.0, 4.0], dtype=np.float32)\n', (1493, 1533), True, 'import numpy as np\n'), ((1540, 1590), 'numpy.array', 'np.array', (['[400.0, 5.0, 6.0, 7.0]'], {'dtype': 'np.float32'}), '([400.0, 5.0, 6.0, 7.0], dtype=np.float32)\n', (1548, 1590), True, 'import numpy as np\n'), ((884, 914), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[4]', 'tf.float32'], {}), '([4], tf.float32)\n', (897, 914), True, 'import tensorflow.compat.v2 as tf\n'), ((922, 952), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[4]', 'tf.float32'], {}), '([4], tf.float32)\n', (935, 952), True, 'import tensorflow.compat.v2 as tf\n'), ((1045, 1083), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[128, 3072]', 'tf.float32'], {}), '([128, 3072], tf.float32)\n', (1058, 1083), True, 'import tensorflow.compat.v2 as tf\n'), ((1091, 1129), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[3072, 256]', 'tf.float32'], {}), '([3072, 256], tf.float32)\n', (1104, 1129), True, 'import tensorflow.compat.v2 as tf\n'), ((1852, 1881), 'iree.tf.support.tf_utils.uniform', 'tf_utils.uniform', (['(128, 3072)'], {}), '((128, 3072))\n', (1868, 1881), False, 'from iree.tf.support import tf_utils\n'), ((1899, 1928), 'iree.tf.support.tf_utils.uniform', 'tf_utils.uniform', (['(3072, 256)'], {}), '((3072, 256))\n', (1915, 1928), False, 'from iree.tf.support import tf_utils\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 11:28:36 2020
@author: routhier
"""
import numpy as np
import sys
import six
try:
import h5py
HDF5_OBJECT_HEADER_LIMIT = 64512
except ImportError:
h5py = None
if sys.version_info[0] == 3:
import pickle
else:
import cPickle as pickle
class H5Dict(object):
""" A dict-like wrapper around h5py groups (or dicts).
This allows us to have a single serialization logic
for both pickling and saving to disk.
Note: This is not intended to be a generic wrapper.
There are lot of edge cases which have been hardcoded,
and makes sense only in the context of model serialization/
deserialization.
# Arguments
path: Either a string (path on disk), a Path, a dict, or a HDF5 Group.
mode: File open mode (one of `{"a", "r", "w"}`).
"""
def __init__(self, path, mode='a'):
if isinstance(path, h5py.Group):
self.data = path
self._is_file = False
elif isinstance(path, six.string_types) or _is_path_instance(path):
self.data = h5py.File(path, mode=mode)
self._is_file = True
elif isinstance(path, dict):
self.data = path
self._is_file = False
if mode == 'w':
self.data.clear()
# Flag to check if a dict is user defined data or a sub group:
self.data['_is_group'] = True
else:
raise TypeError('Required Group, str, Path or dict. '
'Received: {}.'.format(type(path)))
self.read_only = mode == 'r'
@staticmethod
def is_supported_type(path):
"""Check if `path` is of supported type for instantiating a `H5Dict`"""
return (
isinstance(path, h5py.Group) or
isinstance(path, dict) or
isinstance(path, six.string_types) or
_is_path_instance(path)
)
def __setitem__(self, attr, val):
if self.read_only:
raise ValueError('Cannot set item in read-only mode.')
is_np = type(val).__module__ == np.__name__
if isinstance(self.data, dict):
if isinstance(attr, bytes):
attr = attr.decode('utf-8')
if is_np:
self.data[attr] = pickle.dumps(val)
# We have to remember to unpickle in __getitem__
self.data['_{}_pickled'.format(attr)] = True
else:
self.data[attr] = val
return
if isinstance(self.data, h5py.Group) and attr in self.data:
raise KeyError('Cannot set attribute. '
'Group with name "{}" exists.'.format(attr))
if is_np:
dataset = self.data.create_dataset(attr, val.shape, dtype=val.dtype)
if not val.shape:
# scalar
dataset[()] = val
else:
dataset[:] = val
elif isinstance(val, (list, tuple)):
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
bad_attributes = [x for x in val if len(x) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError('The following attributes cannot be saved to '
'HDF5 file because they are larger than '
'%d bytes: %s' % (HDF5_OBJECT_HEADER_LIMIT,
', '.join(bad_attributes)))
if (val and sys.version_info[0] == 3 and isinstance(
val[0], six.string_types)):
# convert to bytes
val = [x.encode('utf-8') for x in val]
data_npy = np.asarray(val)
num_chunks = 1
chunked_data = np.array_split(data_npy, num_chunks)
# This will never loop forever thanks to the test above.
is_too_big = lambda x: x.nbytes > HDF5_OBJECT_HEADER_LIMIT
while any(map(is_too_big, chunked_data)):
num_chunks += 1
chunked_data = np.array_split(data_npy, num_chunks)
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(chunked_data):
self.data.attrs['%s%d' % (attr, chunk_id)] = chunk_data
else:
self.data.attrs[attr] = val
else:
self.data.attrs[attr] = val
def __getitem__(self, attr):
if isinstance(self.data, dict):
if isinstance(attr, bytes):
attr = attr.decode('utf-8')
if attr in self.data:
val = self.data[attr]
if isinstance(val, dict) and val.get('_is_group'):
val = H5Dict(val)
elif '_{}_pickled'.format(attr) in self.data:
val = pickle.loads(val)
return val
else:
if self.read_only:
raise ValueError('Cannot create group in read-only mode.')
val = {'_is_group': True}
self.data[attr] = val
return H5Dict(val)
if attr in self.data.attrs:
val = self.data.attrs[attr]
if type(val).__module__ == np.__name__:
if val.dtype.type == np.string_:
val = val.tolist()
elif attr in self.data:
val = self.data[attr]
if isinstance(val, h5py.Dataset):
val = np.asarray(val)
else:
val = H5Dict(val)
else:
# could be chunked
chunk_attr = '%s%d' % (attr, 0)
is_chunked = chunk_attr in self.data.attrs
if is_chunked:
val = []
chunk_id = 0
while chunk_attr in self.data.attrs:
chunk = self.data.attrs[chunk_attr]
val.extend([x.decode('utf8') for x in chunk])
chunk_id += 1
chunk_attr = '%s%d' % (attr, chunk_id)
else:
if self.read_only:
raise ValueError('Cannot create group in read-only mode.')
val = H5Dict(self.data.create_group(attr))
return val
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def iter(self):
return iter(self.data)
def __getattr__(self, attr):
def wrapper(f):
def h5wrapper(*args, **kwargs):
out = f(*args, **kwargs)
if isinstance(self.data, type(out)):
return H5Dict(out)
else:
return out
return h5wrapper
return wrapper(getattr(self.data, attr))
def close(self):
if isinstance(self.data, h5py.Group):
self.data.file.flush()
if self._is_file:
self.data.close()
def update(self, *args):
if isinstance(self.data, dict):
self.data.update(*args)
raise NotImplementedError
def __contains__(self, key):
if isinstance(self.data, dict):
return key in self.data
else:
return (key in self.data) or (key in self.data.attrs)
def get(self, key, default=None):
if key in self:
return self[key]
return default
def __enter__(self):
return self
def __exit__(self):
self.close()
def _is_path_instance(path):
# We can't use isinstance here because it would require
# us to add pathlib2 to the Python 2 dependencies.
class_name = type(path).__name__
return class_name == 'PosixPath' or class_name == 'WindowsPath'
| [
"h5py.File",
"cPickle.loads",
"numpy.asarray",
"cPickle.dumps",
"numpy.array_split"
] | [((1116, 1142), 'h5py.File', 'h5py.File', (['path'], {'mode': 'mode'}), '(path, mode=mode)\n', (1125, 1142), False, 'import h5py\n'), ((2328, 2345), 'cPickle.dumps', 'pickle.dumps', (['val'], {}), '(val)\n', (2340, 2345), True, 'import cPickle as pickle\n'), ((3926, 3941), 'numpy.asarray', 'np.asarray', (['val'], {}), '(val)\n', (3936, 3941), True, 'import numpy as np\n'), ((3997, 4033), 'numpy.array_split', 'np.array_split', (['data_npy', 'num_chunks'], {}), '(data_npy, num_chunks)\n', (4011, 4033), True, 'import numpy as np\n'), ((4292, 4328), 'numpy.array_split', 'np.array_split', (['data_npy', 'num_chunks'], {}), '(data_npy, num_chunks)\n', (4306, 4328), True, 'import numpy as np\n'), ((5687, 5702), 'numpy.asarray', 'np.asarray', (['val'], {}), '(val)\n', (5697, 5702), True, 'import numpy as np\n'), ((5045, 5062), 'cPickle.loads', 'pickle.loads', (['val'], {}), '(val)\n', (5057, 5062), True, 'import cPickle as pickle\n')] |
import os
import os.path as osp
import mmcv
import numpy as np
from PIL import Image
def norm_angle(angle, range=[-np.pi / 4, np.pi]):
return (angle - range[0]) % range[1] + range[0]
def poly_to_rotated_box_single(poly):
"""
poly:[x0,y0,x1,y1,x2,y2,x3,y3]
to
rotated_box:[x_ctr,y_ctr,w,h,angle]
"""
poly = np.array(poly[:8], dtype=np.float32)
pt1 = (poly[0], poly[1])
pt2 = (poly[2], poly[3])
pt3 = (poly[4], poly[5])
pt4 = (poly[6], poly[7])
edge1 = np.sqrt((pt1[0] - pt2[0]) * (pt1[0] - pt2[0]) +
(pt1[1] - pt2[1]) * (pt1[1] - pt2[1]))
edge2 = np.sqrt((pt2[0] - pt3[0]) * (pt2[0] - pt3[0]) +
(pt2[1] - pt3[1]) * (pt2[1] - pt3[1]))
width = max(edge1, edge2)
height = min(edge1, edge2)
angle = 0
if edge1 > edge2:
angle = np.arctan2(
np.float(pt2[1] - pt1[1]), np.float(pt2[0] - pt1[0]))
elif edge2 >= edge1:
angle = np.arctan2(
np.float(pt4[1] - pt1[1]), np.float(pt4[0] - pt1[0]))
angle = norm_angle(angle)
x_ctr = np.float(pt1[0] + pt3[0]) / 2
y_ctr = np.float(pt1[1] + pt3[1]) / 2
rotated_box = np.array([x_ctr, y_ctr, width, height, angle]).astype('float16')
return rotated_box
def parse_ann_info(label_base_path, img_name, label_ids):
lab_path = osp.join(label_base_path, img_name + '.txt')
bboxes, labels, bboxes_ignore, labels_ignore = [], [], [], []
with open(lab_path, 'r') as f:
for ann_line in f.readlines():
ann_line = ann_line.strip().split(',')
bbox = [float(ann_line[i]) for i in range(8)]
# 8 point to 5 point xywha
bbox = tuple(poly_to_rotated_box_single(bbox).tolist())
# class_name = ann_line[9]
class_name = ann_line[9]
# difficult = int(ann_line[9])
difficult = 0
# ignore difficult =2
if difficult == 0:
bboxes.append(bbox)
labels.append(label_ids[class_name])
elif difficult == 1:
bboxes_ignore.append(bbox)
labels_ignore.append(label_ids[class_name])
return bboxes, labels, bboxes_ignore, labels_ignore
def convert_icdar_to_mmdet(src_path, out_path, label_ids, trainval=True, filter_empty_gt=True, ext='.png'):
"""Generate .pkl format annotation that is consistent with mmdet.
Args:
src_path: dataset path containing images and labelTxt folders.
out_path: output pkl file path
trainval: trainval or test
"""
# img_path = os.path.join(src_path, 'images')
img_path = os.path.join(src_path, 'data')
label_path = os.path.join(src_path, 'labelTxt')
img_lists = os.listdir(img_path)
data_dict = []
for id, img in enumerate(img_lists):
img_info = {}
img_name = osp.splitext(img)[0]
label = os.path.join(label_path, img_name + '.txt')
img = Image.open(osp.join(img_path, img))
img_info['filename'] = img_name + ext
img_info['height'] = img.height
img_info['width'] = img.width
if trainval:
if not os.path.exists(label):
print('Label:' + img_name + '.txt' + ' Not Exist')
continue
# filter images without gt to speed up training
if filter_empty_gt & (osp.getsize(label) == 0):
continue
bboxes, labels, bboxes_ignore, labels_ignore = parse_ann_info(label_path, img_name, label_ids=label_ids)
ann = {}
ann['bboxes'] = np.array(bboxes, dtype=np.float32)
ann['labels'] = np.array(labels, dtype=np.int64)
ann['bboxes_ignore'] = np.array(bboxes_ignore, dtype=np.float32)
ann['labels_ignore'] = np.array(labels_ignore, dtype=np.int64)
img_info['ann'] = ann
data_dict.append(img_info)
mmcv.dump(data_dict, out_path)
if __name__ == '__main__':
class_name = ['name', 'sex', 'birthday', 'address', 'number', 'authority', 'validity', 'nation']
label_ids = {name: i + 1 for i, name in enumerate(class_name)}
root = '/mnt/data/rz/data/idCard/v2'
out_path = '/mnt/data/rz/data/idCard/v2/trainval_idCard.pkl'
# the icdar format of ocr:x1,y1,x2,y2,x3,y3,x4,y4 \t class_name \t difficult 0/1(1:will be ignore)
# convert_icdar_to_mmdet('data/dota_1024/trainval_split',
# 'data/dota_1024/trainval_split/trainval_s2anet.pkl', label_ids=label_ids)
convert_icdar_to_mmdet(root, out_path, label_ids=label_ids, ext='.jpg')
print('done!') | [
"os.path.getsize",
"os.path.exists",
"numpy.float",
"numpy.array",
"mmcv.dump",
"os.path.splitext",
"os.path.join",
"os.listdir",
"numpy.sqrt"
] | [((339, 375), 'numpy.array', 'np.array', (['poly[:8]'], {'dtype': 'np.float32'}), '(poly[:8], dtype=np.float32)\n', (347, 375), True, 'import numpy as np\n'), ((506, 596), 'numpy.sqrt', 'np.sqrt', (['((pt1[0] - pt2[0]) * (pt1[0] - pt2[0]) + (pt1[1] - pt2[1]) * (pt1[1] - pt2[1]))'], {}), '((pt1[0] - pt2[0]) * (pt1[0] - pt2[0]) + (pt1[1] - pt2[1]) * (pt1[1] -\n pt2[1]))\n', (513, 596), True, 'import numpy as np\n'), ((625, 715), 'numpy.sqrt', 'np.sqrt', (['((pt2[0] - pt3[0]) * (pt2[0] - pt3[0]) + (pt2[1] - pt3[1]) * (pt2[1] - pt3[1]))'], {}), '((pt2[0] - pt3[0]) * (pt2[0] - pt3[0]) + (pt2[1] - pt3[1]) * (pt2[1] -\n pt3[1]))\n', (632, 715), True, 'import numpy as np\n'), ((1340, 1384), 'os.path.join', 'osp.join', (['label_base_path', "(img_name + '.txt')"], {}), "(label_base_path, img_name + '.txt')\n", (1348, 1384), True, 'import os.path as osp\n'), ((2640, 2670), 'os.path.join', 'os.path.join', (['src_path', '"""data"""'], {}), "(src_path, 'data')\n", (2652, 2670), False, 'import os\n'), ((2688, 2722), 'os.path.join', 'os.path.join', (['src_path', '"""labelTxt"""'], {}), "(src_path, 'labelTxt')\n", (2700, 2722), False, 'import os\n'), ((2739, 2759), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (2749, 2759), False, 'import os\n'), ((3905, 3935), 'mmcv.dump', 'mmcv.dump', (['data_dict', 'out_path'], {}), '(data_dict, out_path)\n', (3914, 3935), False, 'import mmcv\n'), ((1088, 1113), 'numpy.float', 'np.float', (['(pt1[0] + pt3[0])'], {}), '(pt1[0] + pt3[0])\n', (1096, 1113), True, 'import numpy as np\n'), ((1130, 1155), 'numpy.float', 'np.float', (['(pt1[1] + pt3[1])'], {}), '(pt1[1] + pt3[1])\n', (1138, 1155), True, 'import numpy as np\n'), ((2899, 2942), 'os.path.join', 'os.path.join', (['label_path', "(img_name + '.txt')"], {}), "(label_path, img_name + '.txt')\n", (2911, 2942), False, 'import os\n'), ((871, 896), 'numpy.float', 'np.float', (['(pt2[1] - pt1[1])'], {}), '(pt2[1] - pt1[1])\n', (879, 896), True, 'import numpy as np\n'), ((898, 923), 'numpy.float', 'np.float', (['(pt2[0] - pt1[0])'], {}), '(pt2[0] - pt1[0])\n', (906, 923), True, 'import numpy as np\n'), ((1178, 1224), 'numpy.array', 'np.array', (['[x_ctr, y_ctr, width, height, angle]'], {}), '([x_ctr, y_ctr, width, height, angle])\n', (1186, 1224), True, 'import numpy as np\n'), ((2862, 2879), 'os.path.splitext', 'osp.splitext', (['img'], {}), '(img)\n', (2874, 2879), True, 'import os.path as osp\n'), ((2968, 2991), 'os.path.join', 'osp.join', (['img_path', 'img'], {}), '(img_path, img)\n', (2976, 2991), True, 'import os.path as osp\n'), ((3583, 3617), 'numpy.array', 'np.array', (['bboxes'], {'dtype': 'np.float32'}), '(bboxes, dtype=np.float32)\n', (3591, 3617), True, 'import numpy as np\n'), ((3646, 3678), 'numpy.array', 'np.array', (['labels'], {'dtype': 'np.int64'}), '(labels, dtype=np.int64)\n', (3654, 3678), True, 'import numpy as np\n'), ((3714, 3755), 'numpy.array', 'np.array', (['bboxes_ignore'], {'dtype': 'np.float32'}), '(bboxes_ignore, dtype=np.float32)\n', (3722, 3755), True, 'import numpy as np\n'), ((3791, 3830), 'numpy.array', 'np.array', (['labels_ignore'], {'dtype': 'np.int64'}), '(labels_ignore, dtype=np.int64)\n', (3799, 3830), True, 'import numpy as np\n'), ((990, 1015), 'numpy.float', 'np.float', (['(pt4[1] - pt1[1])'], {}), '(pt4[1] - pt1[1])\n', (998, 1015), True, 'import numpy as np\n'), ((1017, 1042), 'numpy.float', 'np.float', (['(pt4[0] - pt1[0])'], {}), '(pt4[0] - pt1[0])\n', (1025, 1042), True, 'import numpy as np\n'), ((3157, 3178), 'os.path.exists', 'os.path.exists', (['label'], {}), '(label)\n', (3171, 3178), False, 'import os\n'), ((3366, 3384), 'os.path.getsize', 'osp.getsize', (['label'], {}), '(label)\n', (3377, 3384), True, 'import os.path as osp\n')] |
# IMPORTS
# ------------------------------------------------------------------------------
import os
import sys
# DataScience
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
# Custom Packages
nb_dir = os.path.split(os.getcwd())[0]
if nb_dir not in sys.path:
sys.path.append(nb_dir)
from dataset_manager.dataset_manager import DatasetManager
from neural_network_lab.model_preset.logger import Logger
# Import Your Model Setup
from neural_network_lab.ann_classification import ModelNeuralNetwork
# INITIALIZE DATASET
# ------------------------------------------------------------------------------
# Data Parameters
currency_pair = 'USD/JPY'
data_postfix = '12-16'
time_frame = '1H'
dm = DatasetManager(currency_pair, data_postfix).resample(time_frame)
dm.df.reset_index(drop=True, inplace=True)
dm.save_df_copy_into_memory()
df = dm.df
# IMPORT NEURAL NETWORK
# ------------------------------------------------------------------------------
model = ModelNeuralNetwork(data_manager=dm)
model.models_folder = 'trained_models'
model.predict_ma: int = 40
model.n_past: int = 10
model.n_future: int = 3
model.model_task: str = 'classification'
model.model_postfix: str = ''
# INDICATORS
# ------------------------------------------------------------------------------
dm.restore_df()
dm.ewma(model.predict_ma)
if model.predict_ma != 15:
dm.ewma(15)
if model.predict_ma != 20:
dm.ewma(20)
if model.predict_ma != 30:
dm.ewma(30)
if model.predict_ma != 40:
dm.ewma(40)
if model.predict_ma != 60:
dm.ewma(60)
dm.rsi_indicator(25)
dm.stochastic_oscilator(25, 3, 3)
dm.set_indicators(target=model.model_task)
# Derived Quantities
dm.df['past_price_regression'] = dm.df[dm.mean_indicators[0]] / dm.df[dm.mean_indicators[0]].shift(
model.n_future)
dm.df['past_log_regression'] = np.log(dm.df['past_price_regression'])
for mean_average in dm.mean_indicators:
dm.df['mean_diff_{}'.format(mean_average[-2:])] = dm.df[mean_average] - dm.df[
mean_average].shift(1)
dm.df['mean_ret_{}'.format(mean_average[-2:])] = np.log(
dm.df[mean_average] / dm.df[mean_average].shift(1))
# CLASSIFICATION VALUES
dm.df['future_price_regression'] = dm.df[dm.mean_indicators[0]].shift(-model.n_future) / dm.df[
dm.mean_indicators[0]]
dm.df[model.model_task] = np.where(dm.df['future_price_regression'] > 1, 1, 0)
# Drop unnecessary values
dm.df.drop(['low', 'high', 'open'], axis=1, inplace=True)
dm.df.drop(['%d', 'past_price_regression', 'future_price_regression'], axis=1, inplace=True)
# dm.df.drop(model.mean_indicators, axis=1, inplace=True)
dm.df = dm.df.iloc[30:-5]
dm.df.reset_index(drop=True, inplace=True)
dm.set_indicators(target=model.model_task)
df = dm.df
# Test/Train split
df_train, df_test, df_test_close = dm.test_train_split(model)
# NORMALIZATION
# ------------------------------------------------------------------------------
scaler = StandardScaler()
scaled_df_train = scaler.fit_transform(df_train[dm.mean_indicators + dm.indicators])
scaled_df_test = scaler.transform(df_test[dm.mean_indicators + dm.indicators])
# CREATE INPUT VECTORS
# ------------------------------------------------------------------------------
x_train, y_train = model.create_train_vectors(df_train, scaled_df_train)
x_test, y_test, y_test_price = model.create_test_vectors(df_test, scaled_df_test, df_test_close)
# TRAIN NETWORK
# ------------------------------------------------------------------------------
trained_model, training_history = model.train_network(x_train, y_train)
# Plot Training Progress of Error
model.plot_training_loss()
# Plot Training Progress of Accuracy
model.plot_training_metric()
del (currency_pair, data_postfix, nb_dir, time_frame)
# MAKE PREDICTION
# ------------------------------------------------------------------------------
# Load Best Model
classifier = model.load_network()
# Make Predictions
predictions_train = classifier.predict(x_train)
predictions_test = classifier.predict(x_test)
# Set values for evaluation
actual_train = y_train
actual_test = y_test
# CREATE SETS FOR EVALUATION
# Columns: Actual, Prediction, Close Price
# ------------------------------------------------------------------------------
# TRAIN Evaluation Set
df_train_eval = model.create_train_eval_set(actual_train, predictions_train)
# VALIDATION Evaluation Set
df_val_eval = model.create_val_eval_set(actual_train, predictions_train)
# TEST Evaluation Set
df_test_eval = model.create_test_eval_set(actual_test, predictions_test, y_test_price)
# ACCURACY EVALUATION
# ------------------------------------------------------------------------------
model.test_score = model.calc_acc(df_test_eval.copy(), origin=0.5, actual_col='actual',
prediction_col='prediction')
# CONFUSION MATRIX - Test set
from sklearn.metrics import confusion_matrix
confusion_matrix(actual_test,predictions_test.round())
# EVALUATION REPORT - Train set
from sklearn.metrics import classification_report
print(classification_report(actual_train, predictions_train.round()))
# EVALUATION REPORT - Test set
from sklearn.metrics import classification_report
print(classification_report(actual_test, predictions_test.round()))
# TRADING STRATEGIES OPTIMIZATION
# ------------------------------------------------------------------------------
# NN Trading Threshold Optimization on TRAIN Set
strategies = []
for threshold in np.linspace(0, 0.45, 61):
df_eval = df_train_eval.copy()
# Calc without drawdown, it is very time consuming
strategy = model.prediction_strategy(df_eval, origin=0.5, threshold=threshold,
calc_drawdown=False)
strategies.append(strategy)
df_strategies = pd.DataFrame(data=strategies,
columns=['threshold', 'pip_profit', 'sharpe', 'winrate',
'drawdown', 'fees', 'trades_n', ])
# SAVE Strategies into CSV
df_strategies.to_csv(f'{model.models_folder}/{model.model_name}/pred_strategy_optimization.csv',
encoding='utf-8',
index=False)
# SAVE threshold parameter of the best strategy
model.set_pred_best_threshold(df_strategies)
# PLOT threshold optimization
model.plot_threshold_optimization(df_strategies, plot_name='threshold_nn_pred_optimization')
# MACD Strategy optimization
strategies = []
for threshold in np.linspace(0, 0.45, 61):
df_eval = df_train_eval.copy()
# Calc without drawdown, it is very time consuming
strategy = model.macd_strategy(df_eval, origin=0.5, threshold=threshold, calc_drawdown=False)
strategies.append(strategy)
df_strategies = pd.DataFrame(data=strategies,
columns=['threshold', 'pip_profit', 'sharpe', 'winrate', 'drawdown',
'fees', 'trades_n'])
# SAVE Strategies into CSV
df_strategies.to_csv('trained_models/{}/macd_strategy_optimization.csv'.format(model.model_name),
encoding='utf-8',
index=False)
# SAVE threshold parameter of the best strategy
model.set_macd_best_threshold(df_strategies)
# PLOT threshold optimization
model.plot_threshold_optimization(df_strategies, 'threshold_macd_optimization')
# TRADING STRATEGIES EVALUATION
# Calculating strategies with best threshold parameters
best_strategies_evaluations = []
# TEST SET EVALUATION
# ------------------------------------------------------------------------------
# PREDICTION STRATEGY
df_eval = df_test_eval.copy()
strategy = model.prediction_strategy(df_eval, origin=0.5,
threshold=model.nn_pred_strategy_best_threshold)
strategy.insert(0, 'test_nn_pred')
# SAVE to list of strategies
best_strategies_evaluations.append(strategy)
# PLOT Returns
model.plot_cumulative_returns(df_eval, 'nn_pred_test_returns')
# SAVE to parameters for Logger
strategy_dict = model.prediction_strategy(df=df_test_eval.copy(), origin=0.5,
threshold=model.nn_pred_strategy_best_threshold,
form='dict')
model.set_nn_pred_strategy_parameters(strategy_dict)
# MACD STRATEGY
df_eval = df_test_eval.copy()
strategy = model.macd_strategy(df_eval, origin=0.5, threshold=model.macd_strategy_best_threshold)
strategy.insert(0, 'test_macd')
# SAVE to list of strategies
best_strategies_evaluations.append(strategy)
# PLOT Returns
model.plot_cumulative_returns(df_eval, 'macd_test_returns')
# SAVE to parameters for Logger
strategy_dict = model.macd_strategy(df=df_test_eval.copy(), origin=0.5,
threshold=model.macd_strategy_best_threshold, form='dict')
model.set_macd_strategy_parameters(strategy_dict)
# TRAIN SET EVALUATION
# ------------------------------------------------------------------------------
# PREDICTION STRATEGY
df_eval = df_train_eval.copy()
strategy = model.prediction_strategy(df_eval, origin=0.5,
threshold=model.nn_pred_strategy_best_threshold)
strategy.insert(0, 'train_nn_pred')
# SAVE to list of strategies
best_strategies_evaluations.append(strategy)
# PLOT Returns
model.plot_cumulative_returns(df_eval, 'nn_pred_train_returns')
# SAVE to parameters for Logger
model.nn_pred_train_pip_return = model.get_cumulative_pip_return(df_eval)
# MACD STRATEGY
df_eval = df_train_eval.copy()
strategy = model.macd_strategy(df_eval, origin=0.5, threshold=model.macd_strategy_best_threshold)
strategy.insert(0, 'train_macd')
# SAVE to list of strategies
best_strategies_evaluations.append(strategy)
# PLOT Returns
model.plot_cumulative_returns(df_eval, 'macd_train_returns')
# SAVE to parameters for Logger
model.macd_strategy_train_pip_return = model.get_cumulative_pip_return(df_eval)
# VALIDATION SET EVALUATION
# ------------------------------------------------------------------------------
# PREDICTION STRATEGY
df_eval = df_val_eval.copy()
strategy = model.prediction_strategy(df_eval, origin=0.5,
threshold=model.nn_pred_strategy_best_threshold)
strategy.insert(0, 'val_nn_pred')
# SAVE to list of strategies
best_strategies_evaluations.append(strategy)
# PLOT Returns
model.plot_cumulative_returns(df_eval, 'nn_pred_val_returns')
# SAVE to parameters for Logger
model.nn_pred_val_pip_return = model.get_cumulative_pip_return(df_eval)
# MACD STRATEGY
df_eval = df_val_eval.copy()
strategy = model.macd_strategy(df_eval, origin=0.5, threshold=model.macd_strategy_best_threshold)
strategy.insert(0, 'val_macd')
# SAVE to list of strategies
best_strategies_evaluations.append(strategy)
# PLOT Returns
model.plot_cumulative_returns(df_eval, 'macd_val_returns')
# SAVE to parameters for Logger
model.macd_strategy_val_pip_return = model.get_cumulative_pip_return(df_eval)
# EXPORT INFORMATION
# ------------------------------------------------------------------------------
# Results of best strategy evaluation
df_strategies_eval = pd.DataFrame(data=best_strategies_evaluations,
columns=['type', 'threshold', 'pip_profit', 'sharpe', 'winrate',
'drawdown', 'fees', 'trades_n'])
# Export Results to CSV
df_strategies_eval.to_csv(
f'{model.models_folder}/{model.model_name}/best_strategies_evaluation.csv',
encoding='utf-8', index=False)
# LOGGER
# ------------------------------------------------------------------------------
# Init logger
logger = Logger()
logger.set_model(model)
logger.set_data_manager(dm)
# Log model parameters
logger.log_model_info()
| [
"pandas.DataFrame",
"sys.path.append",
"dataset_manager.dataset_manager.DatasetManager",
"sklearn.preprocessing.StandardScaler",
"numpy.log",
"os.getcwd",
"numpy.where",
"numpy.linspace",
"neural_network_lab.ann_classification.ModelNeuralNetwork",
"neural_network_lab.model_preset.logger.Logger"
] | [((996, 1031), 'neural_network_lab.ann_classification.ModelNeuralNetwork', 'ModelNeuralNetwork', ([], {'data_manager': 'dm'}), '(data_manager=dm)\n', (1014, 1031), False, 'from neural_network_lab.ann_classification import ModelNeuralNetwork\n'), ((1839, 1877), 'numpy.log', 'np.log', (["dm.df['past_price_regression']"], {}), "(dm.df['past_price_regression'])\n", (1845, 1877), True, 'import numpy as np\n'), ((2327, 2379), 'numpy.where', 'np.where', (["(dm.df['future_price_regression'] > 1)", '(1)', '(0)'], {}), "(dm.df['future_price_regression'] > 1, 1, 0)\n", (2335, 2379), True, 'import numpy as np\n'), ((2928, 2944), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2942, 2944), False, 'from sklearn.preprocessing import StandardScaler\n'), ((5426, 5450), 'numpy.linspace', 'np.linspace', (['(0)', '(0.45)', '(61)'], {}), '(0, 0.45, 61)\n', (5437, 5450), True, 'import numpy as np\n'), ((5735, 5858), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'strategies', 'columns': "['threshold', 'pip_profit', 'sharpe', 'winrate', 'drawdown', 'fees', 'trades_n'\n ]"}), "(data=strategies, columns=['threshold', 'pip_profit', 'sharpe',\n 'winrate', 'drawdown', 'fees', 'trades_n'])\n", (5747, 5858), True, 'import pandas as pd\n'), ((6400, 6424), 'numpy.linspace', 'np.linspace', (['(0)', '(0.45)', '(61)'], {}), '(0, 0.45, 61)\n', (6411, 6424), True, 'import numpy as np\n'), ((6662, 6785), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'strategies', 'columns': "['threshold', 'pip_profit', 'sharpe', 'winrate', 'drawdown', 'fees', 'trades_n'\n ]"}), "(data=strategies, columns=['threshold', 'pip_profit', 'sharpe',\n 'winrate', 'drawdown', 'fees', 'trades_n'])\n", (6674, 6785), True, 'import pandas as pd\n'), ((10966, 11114), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'best_strategies_evaluations', 'columns': "['type', 'threshold', 'pip_profit', 'sharpe', 'winrate', 'drawdown', 'fees',\n 'trades_n']"}), "(data=best_strategies_evaluations, columns=['type', 'threshold',\n 'pip_profit', 'sharpe', 'winrate', 'drawdown', 'fees', 'trades_n'])\n", (10978, 11114), True, 'import pandas as pd\n'), ((11468, 11476), 'neural_network_lab.model_preset.logger.Logger', 'Logger', ([], {}), '()\n', (11474, 11476), False, 'from neural_network_lab.model_preset.logger import Logger\n'), ((303, 326), 'sys.path.append', 'sys.path.append', (['nb_dir'], {}), '(nb_dir)\n', (318, 326), False, 'import sys\n'), ((256, 267), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (265, 267), False, 'import os\n'), ((733, 776), 'dataset_manager.dataset_manager.DatasetManager', 'DatasetManager', (['currency_pair', 'data_postfix'], {}), '(currency_pair, data_postfix)\n', (747, 776), False, 'from dataset_manager.dataset_manager import DatasetManager\n')] |
'''
Author: Dr. <NAME> <<EMAIL>>
This package is distributed under New BSD license.
'''
from __future__ import division
import warnings
import numpy as np
from smt.surrogate_models.krg_based import KrgBased
from smt.utils.kriging_utils import componentwise_distance_PLS, ge_compute_pls
"""
The KPLS class.
"""
class GEKPLS(KrgBased):
"""
- GEKPLS
"""
def _initialize(self):
super(GEKPLS, self)._initialize()
declare = self.options.declare
declare('xlimits', types=np.ndarray,
desc='Lower/upper bounds in each dimension - ndarray [nx, 2]')
declare('n_comp', 1, types=int, desc='Number of principal components')
declare('theta0', [1e-2], types=(list, np.ndarray), desc='Initial hyperparameters')
declare('delta_x', 1e-4, types=(int, float), desc='Step used in the FOTA')
declare('extra_points', 0, types=int, desc='Number of extra points per training point')
self.supports['training_derivatives'] = True
self.name = 'GEKPLS'
def _compute_pls(self,X,y):
if 0 in self.training_points[None]:
self.coeff_pls, XX, yy = ge_compute_pls(X.copy(),y.copy(),self.options['n_comp'],
self.training_points,self.options['delta_x'],self.options['xlimits'],
self.options['extra_points'])
if self.options['extra_points'] != 0:
self.nt *= (self.options['extra_points']+1)
X = np.vstack((X,XX))
y = np.vstack((y,yy))
return X,y
def _componentwise_distance(self,dx,opt=0):
d = componentwise_distance_PLS(dx,self.options['corr'].__name__,
self.options['n_comp'],self.coeff_pls)
return d
| [
"smt.utils.kriging_utils.componentwise_distance_PLS",
"numpy.vstack"
] | [((1604, 1710), 'smt.utils.kriging_utils.componentwise_distance_PLS', 'componentwise_distance_PLS', (['dx', "self.options['corr'].__name__", "self.options['n_comp']", 'self.coeff_pls'], {}), "(dx, self.options['corr'].__name__, self.options[\n 'n_comp'], self.coeff_pls)\n", (1630, 1710), False, 'from smt.utils.kriging_utils import componentwise_distance_PLS, ge_compute_pls\n'), ((1466, 1484), 'numpy.vstack', 'np.vstack', (['(X, XX)'], {}), '((X, XX))\n', (1475, 1484), True, 'import numpy as np\n'), ((1504, 1522), 'numpy.vstack', 'np.vstack', (['(y, yy)'], {}), '((y, yy))\n', (1513, 1522), True, 'import numpy as np\n')] |
"""
Misc tools for implementing data structures
Note: pandas.core.common is *not* part of the public API.
"""
import collections
from collections import abc
from datetime import datetime, timedelta
from functools import partial
import inspect
from typing import Any, Collection, Iterable, Union
import numpy as np
from pandas._libs import lib, tslibs
from pandas._typing import T
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import _iterable_not_string
from pandas.core.dtypes.missing import isna, isnull, notnull # noqa
class SettingWithCopyError(ValueError):
pass
class SettingWithCopyWarning(Warning):
pass
def flatten(l):
"""
Flatten an arbitrarily nested sequence.
Parameters
----------
l : sequence
The non string sequence to flatten
Notes
-----
This doesn't consider strings sequences.
Returns
-------
flattened : generator
"""
for el in l:
if _iterable_not_string(el):
for s in flatten(el):
yield s
else:
yield el
def consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
try:
if obj.name != name:
name = None
except ValueError:
name = None
return name
def maybe_box(indexer, values, obj, key):
# if we have multiples coming back, box em
if isinstance(values, np.ndarray):
return obj[indexer.get_loc(key)]
# return the value
return values
def maybe_box_datetimelike(value):
# turn a datetime like into a Timestamp/timedelta as needed
if isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslibs.Timedelta(value)
return value
values_from_object = lib.values_from_object
def is_bool_indexer(key: Any) -> bool:
"""
Check whether `key` is a valid boolean indexer.
Parameters
----------
key : Any
Only list-likes may be considered boolean indexers.
All other types are not considered a boolean indexer.
For array-like input, boolean ndarrays or ExtensionArrays
with ``_is_boolean`` set are considered boolean indexers.
Returns
-------
bool
Raises
------
ValueError
When the array is an object-dtype ndarray or ExtensionArray
and contains missing values.
"""
na_msg = "cannot index with vector containing NA / NaN values"
if isinstance(key, (ABCSeries, np.ndarray, ABCIndex)) or (
is_array_like(key) and is_extension_array_dtype(key.dtype)
):
if key.dtype == np.object_:
key = np.asarray(values_from_object(key))
if not lib.is_bool_array(key):
if isna(key).any():
raise ValueError(na_msg)
return False
return True
elif is_bool_dtype(key.dtype):
# an ndarray with bool-dtype by definition has no missing values.
# So we only need to check for NAs in ExtensionArrays
if is_extension_array_dtype(key.dtype):
if np.any(key.isna()):
raise ValueError(na_msg)
return True
elif isinstance(key, list):
try:
arr = np.asarray(key)
return arr.dtype == np.bool_ and len(arr) == len(key)
except TypeError: # pragma: no cover
return False
return False
def cast_scalar_indexer(val):
"""
To avoid numpy DeprecationWarnings, cast float to integer where valid.
Parameters
----------
val : scalar
Returns
-------
outval : scalar
"""
# assumes lib.is_scalar(val)
if lib.is_float(val) and val == int(val):
return int(val)
return val
def not_none(*args):
"""
Returns a generator consisting of the arguments that are not None.
"""
return (arg for arg in args if arg is not None)
def any_none(*args):
"""
Returns a boolean indicating if any argument is None.
"""
return any(arg is None for arg in args)
def all_none(*args):
"""
Returns a boolean indicating if all arguments are None.
"""
return all(arg is None for arg in args)
def any_not_none(*args):
"""
Returns a boolean indicating if any argument is not None.
"""
return any(arg is not None for arg in args)
def all_not_none(*args):
"""
Returns a boolean indicating if all arguments are not None.
"""
return all(arg is not None for arg in args)
def count_not_none(*args):
"""
Returns the count of arguments that are not None.
"""
return sum(x is not None for x in args)
def try_sort(iterable):
listed = list(iterable)
try:
return sorted(listed)
except TypeError:
return listed
def asarray_tuplesafe(values, dtype=None):
if not (isinstance(values, (list, tuple)) or hasattr(values, "__array__")):
values = list(values)
elif isinstance(values, ABCIndexClass):
return values.values
if isinstance(values, list) and dtype in [np.object_, object]:
return construct_1d_object_array_from_listlike(values)
result = np.asarray(values, dtype=dtype)
if issubclass(result.dtype.type, str):
result = np.asarray(values, dtype=object)
if result.ndim == 2:
# Avoid building an array of arrays:
values = [tuple(x) for x in values]
result = construct_1d_object_array_from_listlike(values)
return result
def index_labels_to_array(labels, dtype=None):
"""
Transform label or iterable of labels to array, for use in Index.
Parameters
----------
dtype : dtype
If specified, use as dtype of the resulting array, otherwise infer.
Returns
-------
array
"""
if isinstance(labels, (str, tuple)):
labels = [labels]
if not isinstance(labels, (list, np.ndarray)):
try:
labels = list(labels)
except TypeError: # non-iterable
labels = [labels]
labels = asarray_tuplesafe(labels, dtype=dtype)
return labels
def maybe_make_list(obj):
if obj is not None and not isinstance(obj, (tuple, list)):
return [obj]
return obj
def maybe_iterable_to_list(obj: Union[Iterable[T], T]) -> Union[Collection[T], T]:
"""
If obj is Iterable but not list-like, consume into list.
"""
if isinstance(obj, abc.Iterable) and not isinstance(obj, abc.Sized):
return list(obj)
return obj
def is_null_slice(obj):
"""
We have a null slice.
"""
return (
isinstance(obj, slice)
and obj.start is None
and obj.stop is None
and obj.step is None
)
def is_true_slices(l):
"""
Find non-trivial slices in "l": return a list of booleans with same length.
"""
return [isinstance(k, slice) and not is_null_slice(k) for k in l]
# TODO: used only once in indexing; belongs elsewhere?
def is_full_slice(obj, l):
"""
We have a full length slice.
"""
return (
isinstance(obj, slice) and obj.start == 0 and obj.stop == l and obj.step is None
)
def get_callable_name(obj):
# typical case has name
if hasattr(obj, "__name__"):
return getattr(obj, "__name__")
# some objects don't; could recurse
if isinstance(obj, partial):
return get_callable_name(obj.func)
# fall back to class name
if hasattr(obj, "__call__"):
return type(obj).__name__
# everything failed (probably because the argument
# wasn't actually callable); we return None
# instead of the empty string in this case to allow
# distinguishing between no name and a name of ''
return None
def apply_if_callable(maybe_callable, obj, **kwargs):
"""
Evaluate possibly callable input using obj and kwargs if it is callable,
otherwise return as it is.
Parameters
----------
maybe_callable : possibly a callable
obj : NDFrame
**kwargs
"""
if callable(maybe_callable):
return maybe_callable(obj, **kwargs)
return maybe_callable
def dict_compat(d):
"""
Helper function to convert datetimelike-keyed dicts
to Timestamp-keyed dict.
Parameters
----------
d: dict like object
Returns
-------
dict
"""
return {maybe_box_datetimelike(key): value for key, value in d.items()}
def standardize_mapping(into):
"""
Helper function to standardize a supplied mapping.
.. versionadded:: 0.21.0
Parameters
----------
into : instance or subclass of collections.abc.Mapping
Must be a class, an initialized collections.defaultdict,
or an instance of a collections.abc.Mapping subclass.
Returns
-------
mapping : a collections.abc.Mapping subclass or other constructor
a callable object that can accept an iterator to create
the desired Mapping.
See Also
--------
DataFrame.to_dict
Series.to_dict
"""
if not inspect.isclass(into):
if isinstance(into, collections.defaultdict):
return partial(collections.defaultdict, into.default_factory)
into = type(into)
if not issubclass(into, abc.Mapping):
raise TypeError(f"unsupported type: {into}")
elif into == collections.defaultdict:
raise TypeError("to_dict() only accepts initialized defaultdicts")
return into
def random_state(state=None):
"""
Helper function for processing random_state arguments.
Parameters
----------
state : int, np.random.RandomState, None.
If receives an int, passes to np.random.RandomState() as seed.
If receives an np.random.RandomState object, just returns object.
If receives `None`, returns np.random.
If receives anything else, raises an informative ValueError.
Default None.
Returns
-------
np.random.RandomState
"""
if is_integer(state):
return np.random.RandomState(state)
elif isinstance(state, np.random.RandomState):
return state
elif state is None:
return np.random
else:
raise ValueError(
"random_state must be an integer, a numpy RandomState, or None"
)
def pipe(obj, func, *args, **kwargs):
"""
Apply a function ``func`` to object ``obj`` either by passing obj as the
first argument to the function or, in the case that the func is a tuple,
interpret the first element of the tuple as a function and pass the obj to
that function as a keyword argument whose key is the value of the second
element of the tuple.
Parameters
----------
func : callable or tuple of (callable, str)
Function to apply to this object or, alternatively, a
``(callable, data_keyword)`` tuple where ``data_keyword`` is a
string indicating the keyword of `callable`` that expects the
object.
*args : iterable, optional
Positional arguments passed into ``func``.
**kwargs : dict, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
msg = f"{target} is both the pipe target and a keyword argument"
raise ValueError(msg)
kwargs[target] = obj
return func(*args, **kwargs)
else:
return func(obj, *args, **kwargs)
def get_rename_function(mapper):
"""
Returns a function that will map names/labels, dependent if mapper
is a dict, Series or just a function.
"""
if isinstance(mapper, (abc.Mapping, ABCSeries)):
def f(x):
if x in mapper:
return mapper[x]
else:
return x
else:
f = mapper
return f
| [
"pandas.core.dtypes.common.is_array_like",
"functools.partial",
"pandas.core.dtypes.inference._iterable_not_string",
"pandas._libs.lib.is_bool_array",
"pandas._libs.lib.is_float",
"pandas._libs.tslibs.Timedelta",
"pandas.core.dtypes.common.is_extension_array_dtype",
"inspect.isclass",
"numpy.asarray... | [((5495, 5526), 'numpy.asarray', 'np.asarray', (['values'], {'dtype': 'dtype'}), '(values, dtype=dtype)\n', (5505, 5526), True, 'import numpy as np\n'), ((10265, 10282), 'pandas.core.dtypes.common.is_integer', 'is_integer', (['state'], {}), '(state)\n', (10275, 10282), False, 'from pandas.core.dtypes.common import is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer\n'), ((1207, 1231), 'pandas.core.dtypes.inference._iterable_not_string', '_iterable_not_string', (['el'], {}), '(el)\n', (1227, 1231), False, 'from pandas.core.dtypes.inference import _iterable_not_string\n'), ((1934, 1957), 'pandas._libs.tslibs.Timestamp', 'tslibs.Timestamp', (['value'], {}), '(value)\n', (1950, 1957), False, 'from pandas._libs import lib, tslibs\n'), ((4009, 4026), 'pandas._libs.lib.is_float', 'lib.is_float', (['val'], {}), '(val)\n', (4021, 4026), False, 'from pandas._libs import lib, tslibs\n'), ((5433, 5480), 'pandas.core.dtypes.cast.construct_1d_object_array_from_listlike', 'construct_1d_object_array_from_listlike', (['values'], {}), '(values)\n', (5472, 5480), False, 'from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike\n'), ((5588, 5620), 'numpy.asarray', 'np.asarray', (['values'], {'dtype': 'object'}), '(values, dtype=object)\n', (5598, 5620), True, 'import numpy as np\n'), ((5753, 5800), 'pandas.core.dtypes.cast.construct_1d_object_array_from_listlike', 'construct_1d_object_array_from_listlike', (['values'], {}), '(values)\n', (5792, 5800), False, 'from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike\n'), ((9334, 9355), 'inspect.isclass', 'inspect.isclass', (['into'], {}), '(into)\n', (9349, 9355), False, 'import inspect\n'), ((10299, 10327), 'numpy.random.RandomState', 'np.random.RandomState', (['state'], {}), '(state)\n', (10320, 10327), True, 'import numpy as np\n'), ((2031, 2054), 'pandas._libs.tslibs.Timedelta', 'tslibs.Timedelta', (['value'], {}), '(value)\n', (2047, 2054), False, 'from pandas._libs import lib, tslibs\n'), ((2842, 2860), 'pandas.core.dtypes.common.is_array_like', 'is_array_like', (['key'], {}), '(key)\n', (2855, 2860), False, 'from pandas.core.dtypes.common import is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer\n'), ((2865, 2900), 'pandas.core.dtypes.common.is_extension_array_dtype', 'is_extension_array_dtype', (['key.dtype'], {}), '(key.dtype)\n', (2889, 2900), False, 'from pandas.core.dtypes.common import is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer\n'), ((3189, 3213), 'pandas.core.dtypes.common.is_bool_dtype', 'is_bool_dtype', (['key.dtype'], {}), '(key.dtype)\n', (3202, 3213), False, 'from pandas.core.dtypes.common import is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer\n'), ((9430, 9484), 'functools.partial', 'partial', (['collections.defaultdict', 'into.default_factory'], {}), '(collections.defaultdict, into.default_factory)\n', (9437, 9484), False, 'from functools import partial\n'), ((3018, 3040), 'pandas._libs.lib.is_bool_array', 'lib.is_bool_array', (['key'], {}), '(key)\n', (3035, 3040), False, 'from pandas._libs import lib, tslibs\n'), ((3374, 3409), 'pandas.core.dtypes.common.is_extension_array_dtype', 'is_extension_array_dtype', (['key.dtype'], {}), '(key.dtype)\n', (3398, 3409), False, 'from pandas.core.dtypes.common import is_array_like, is_bool_dtype, is_extension_array_dtype, is_integer\n'), ((3582, 3597), 'numpy.asarray', 'np.asarray', (['key'], {}), '(key)\n', (3592, 3597), True, 'import numpy as np\n'), ((3061, 3070), 'pandas.core.dtypes.missing.isna', 'isna', (['key'], {}), '(key)\n', (3065, 3070), False, 'from pandas.core.dtypes.missing import isna, isnull, notnull\n')] |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import math
import sys
import os
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets
from paddle.fluid.executor import Executor
from paddle.fluid.optimizer import SGDOptimizer
paddle.enable_static()
IS_SPARSE = True
USE_GPU = False
BATCH_SIZE = 256
def get_usr_combined_features():
# FIXME(dzh) : old API integer_value(10) may has range check.
# currently we don't have user configurated check.
USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1
uid = layers.data(name='user_id', shape=[1], dtype='int64')
usr_emb = layers.embedding(input=uid,
dtype='float32',
size=[USR_DICT_SIZE, 32],
param_attr='user_table',
is_sparse=IS_SPARSE)
usr_fc = layers.fc(input=usr_emb, size=32)
USR_GENDER_DICT_SIZE = 2
usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64')
usr_gender_emb = layers.embedding(input=usr_gender_id,
size=[USR_GENDER_DICT_SIZE, 16],
param_attr='gender_table',
is_sparse=IS_SPARSE)
usr_gender_fc = layers.fc(input=usr_gender_emb, size=16)
USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table)
usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64")
usr_age_emb = layers.embedding(input=usr_age_id,
size=[USR_AGE_DICT_SIZE, 16],
is_sparse=IS_SPARSE,
param_attr='age_table')
usr_age_fc = layers.fc(input=usr_age_emb, size=16)
USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1
usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64")
usr_job_emb = layers.embedding(input=usr_job_id,
size=[USR_JOB_DICT_SIZE, 16],
param_attr='job_table',
is_sparse=IS_SPARSE)
usr_job_fc = layers.fc(input=usr_job_emb, size=16)
concat_embed = layers.concat(
input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1)
usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
return usr_combined_features
def get_mov_combined_features():
MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1
mov_id = layers.data(name='movie_id', shape=[1], dtype='int64')
mov_emb = layers.embedding(input=mov_id,
dtype='float32',
size=[MOV_DICT_SIZE, 32],
param_attr='movie_table',
is_sparse=IS_SPARSE)
mov_fc = layers.fc(input=mov_emb, size=32)
CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories())
category_id = layers.data(name='category_id',
shape=[1],
dtype='int64',
lod_level=1)
mov_categories_emb = layers.embedding(input=category_id,
size=[CATEGORY_DICT_SIZE, 32],
is_sparse=IS_SPARSE)
mov_categories_hidden = layers.sequence_pool(input=mov_categories_emb,
pool_type="sum")
MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict())
mov_title_id = layers.data(name='movie_title',
shape=[1],
dtype='int64',
lod_level=1)
mov_title_emb = layers.embedding(input=mov_title_id,
size=[MOV_TITLE_DICT_SIZE, 32],
is_sparse=IS_SPARSE)
mov_title_conv = nets.sequence_conv_pool(input=mov_title_emb,
num_filters=32,
filter_size=3,
act="tanh",
pool_type="sum")
concat_embed = layers.concat(
input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1)
# FIXME(dzh) : need tanh operator
mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
return mov_combined_features
def model():
usr_combined_features = get_usr_combined_features()
mov_combined_features = get_mov_combined_features()
# need cos sim
inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features)
scale_infer = layers.scale(x=inference, scale=5.0)
label = layers.data(name='score', shape=[1], dtype='float32')
square_cost = layers.square_error_cost(input=scale_infer, label=label)
avg_cost = layers.mean(square_cost)
return scale_infer, avg_cost
def train(use_cuda, save_dirname, is_local=True):
scale_infer, avg_cost = model()
# test program
test_program = fluid.default_main_program().clone(for_test=True)
sgd_optimizer = SGDOptimizer(learning_rate=0.2)
sgd_optimizer.minimize(avg_cost)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = Executor(place)
train_reader = paddle.batch(paddle.reader.shuffle(
paddle.dataset.movielens.train(), buf_size=8192),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(paddle.dataset.movielens.test(),
batch_size=BATCH_SIZE)
feed_order = [
'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id',
'movie_title', 'score'
]
def train_loop(main_program):
exe.run(framework.default_startup_program())
feed_list = [
main_program.global_block().var(var_name) for var_name in feed_order
]
feeder = fluid.DataFeeder(feed_list, place)
PASS_NUM = 100
for pass_id in range(PASS_NUM):
for batch_id, data in enumerate(train_reader()):
# train a mini-batch
outs = exe.run(program=main_program,
feed=feeder.feed(data),
fetch_list=[avg_cost])
out = np.array(outs[0])
if (batch_id + 1) % 10 == 0:
avg_cost_set = []
for test_data in test_reader():
avg_cost_np = exe.run(program=test_program,
feed=feeder.feed(test_data),
fetch_list=[avg_cost])
avg_cost_set.append(avg_cost_np[0])
break # test only 1 segment for speeding up CI
# get test avg_cost
test_avg_cost = np.array(avg_cost_set).mean()
if test_avg_cost < 6.0:
# if avg_cost less than 6.0, we think our code is good.
if save_dirname is not None:
fluid.io.save_inference_model(
save_dirname, [
"user_id", "gender_id", "age_id", "job_id",
"movie_id", "category_id", "movie_title"
], [scale_infer], exe)
return
if math.isnan(float(out[0])):
sys.exit("got NaN loss, training failed.")
if is_local:
train_loop(fluid.default_main_program())
else:
port = os.getenv("PADDLE_PSERVER_PORT", "6174")
pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip...
eplist = []
for ip in pserver_ips.split(","):
eplist.append(':'.join([ip, port]))
pserver_endpoints = ",".join(eplist) # ip:port,ip:port...
trainers = int(os.getenv("PADDLE_TRAINERS"))
current_endpoint = os.getenv("POD_IP") + ":" + port
trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER")
t = fluid.DistributeTranspiler()
t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers)
if training_role == "PSERVER":
pserver_prog = t.get_pserver_program(current_endpoint)
pserver_startup = t.get_startup_program(current_endpoint,
pserver_prog)
exe.run(pserver_startup)
exe.run(pserver_prog)
elif training_role == "TRAINER":
train_loop(t.get_trainer_program())
def infer(use_cuda, save_dirname=None):
if save_dirname is None:
return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be fed
# data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators).
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
# Use the first data from paddle.dataset.movielens.test() as input
assert feed_target_names[0] == "user_id"
# Use create_lod_tensor(data, recursive_sequence_lengths, place) API
# to generate LoD Tensor where `data` is a list of sequences of index
# numbers, `recursive_sequence_lengths` is the length-based level of detail
# (lod) info associated with `data`.
# For example, data = [[10, 2, 3], [2, 3]] means that it contains
# two sequences of indexes, of length 3 and 2, respectively.
# Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one
# level of detail info, indicating that `data` consists of two sequences
# of length 3 and 2, respectively.
user_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place)
assert feed_target_names[1] == "gender_id"
gender_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place)
assert feed_target_names[2] == "age_id"
age_id = fluid.create_lod_tensor([[np.int64(0)]], [[1]], place)
assert feed_target_names[3] == "job_id"
job_id = fluid.create_lod_tensor([[np.int64(10)]], [[1]], place)
assert feed_target_names[4] == "movie_id"
movie_id = fluid.create_lod_tensor([[np.int64(783)]], [[1]], place)
assert feed_target_names[5] == "category_id"
category_id = fluid.create_lod_tensor(
[np.array([10, 8, 9], dtype='int64')], [[3]], place)
assert feed_target_names[6] == "movie_title"
movie_title = fluid.create_lod_tensor(
[np.array([1069, 4140, 2923, 710, 988], dtype='int64')], [[5]],
place)
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets.
results = exe.run(inference_program,
feed={
feed_target_names[0]: user_id,
feed_target_names[1]: gender_id,
feed_target_names[2]: age_id,
feed_target_names[3]: job_id,
feed_target_names[4]: movie_id,
feed_target_names[5]: category_id,
feed_target_names[6]: movie_title
},
fetch_list=fetch_targets,
return_numpy=False)
print("inferred score: ", np.array(results[0]))
def main(use_cuda):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
# Directory for saving the inference model
save_dirname = "recommender_system.inference.model"
train(use_cuda, save_dirname)
infer(use_cuda, save_dirname)
if __name__ == '__main__':
main(USE_GPU)
| [
"paddle.fluid.io.save_inference_model",
"paddle.enable_static",
"paddle.fluid.layers.data",
"paddle.fluid.layers.embedding",
"paddle.fluid.framework.default_startup_program",
"paddle.fluid.executor.Executor",
"paddle.dataset.movielens.get_movie_title_dict",
"paddle.fluid.layers.fc",
"paddle.fluid.la... | [((952, 974), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (972, 974), False, 'import paddle\n'), ((1257, 1310), 'paddle.fluid.layers.data', 'layers.data', ([], {'name': '"""user_id"""', 'shape': '[1]', 'dtype': '"""int64"""'}), "(name='user_id', shape=[1], dtype='int64')\n", (1268, 1310), True, 'import paddle.fluid.layers as layers\n'), ((1326, 1446), 'paddle.fluid.layers.embedding', 'layers.embedding', ([], {'input': 'uid', 'dtype': '"""float32"""', 'size': '[USR_DICT_SIZE, 32]', 'param_attr': '"""user_table"""', 'is_sparse': 'IS_SPARSE'}), "(input=uid, dtype='float32', size=[USR_DICT_SIZE, 32],\n param_attr='user_table', is_sparse=IS_SPARSE)\n", (1342, 1446), True, 'import paddle.fluid.layers as layers\n'), ((1581, 1614), 'paddle.fluid.layers.fc', 'layers.fc', ([], {'input': 'usr_emb', 'size': '(32)'}), '(input=usr_emb, size=32)\n', (1590, 1614), True, 'import paddle.fluid.layers as layers\n'), ((1666, 1721), 'paddle.fluid.layers.data', 'layers.data', ([], {'name': '"""gender_id"""', 'shape': '[1]', 'dtype': '"""int64"""'}), "(name='gender_id', shape=[1], dtype='int64')\n", (1677, 1721), True, 'import paddle.fluid.layers as layers\n'), ((1744, 1866), 'paddle.fluid.layers.embedding', 'layers.embedding', ([], {'input': 'usr_gender_id', 'size': '[USR_GENDER_DICT_SIZE, 16]', 'param_attr': '"""gender_table"""', 'is_sparse': 'IS_SPARSE'}), "(input=usr_gender_id, size=[USR_GENDER_DICT_SIZE, 16],\n param_attr='gender_table', is_sparse=IS_SPARSE)\n", (1760, 1866), True, 'import paddle.fluid.layers as layers\n'), ((1998, 2038), 'paddle.fluid.layers.fc', 'layers.fc', ([], {'input': 'usr_gender_emb', 'size': '(16)'}), '(input=usr_gender_emb, size=16)\n', (2007, 2038), True, 'import paddle.fluid.layers as layers\n'), ((2121, 2173), 'paddle.fluid.layers.data', 'layers.data', ([], {'name': '"""age_id"""', 'shape': '[1]', 'dtype': '"""int64"""'}), "(name='age_id', shape=[1], dtype='int64')\n", (2132, 2173), True, 'import paddle.fluid.layers as layers\n'), ((2193, 2307), 'paddle.fluid.layers.embedding', 'layers.embedding', ([], {'input': 'usr_age_id', 'size': '[USR_AGE_DICT_SIZE, 16]', 'is_sparse': 'IS_SPARSE', 'param_attr': '"""age_table"""'}), "(input=usr_age_id, size=[USR_AGE_DICT_SIZE, 16], is_sparse=\n IS_SPARSE, param_attr='age_table')\n", (2209, 2307), True, 'import paddle.fluid.layers as layers\n'), ((2426, 2463), 'paddle.fluid.layers.fc', 'layers.fc', ([], {'input': 'usr_age_emb', 'size': '(16)'}), '(input=usr_age_emb, size=16)\n', (2435, 2463), True, 'import paddle.fluid.layers as layers\n'), ((2548, 2600), 'paddle.fluid.layers.data', 'layers.data', ([], {'name': '"""job_id"""', 'shape': '[1]', 'dtype': '"""int64"""'}), "(name='job_id', shape=[1], dtype='int64')\n", (2559, 2600), True, 'import paddle.fluid.layers as layers\n'), ((2620, 2734), 'paddle.fluid.layers.embedding', 'layers.embedding', ([], {'input': 'usr_job_id', 'size': '[USR_JOB_DICT_SIZE, 16]', 'param_attr': '"""job_table"""', 'is_sparse': 'IS_SPARSE'}), "(input=usr_job_id, size=[USR_JOB_DICT_SIZE, 16], param_attr\n ='job_table', is_sparse=IS_SPARSE)\n", (2636, 2734), True, 'import paddle.fluid.layers as layers\n'), ((2853, 2890), 'paddle.fluid.layers.fc', 'layers.fc', ([], {'input': 'usr_job_emb', 'size': '(16)'}), '(input=usr_job_emb, size=16)\n', (2862, 2890), True, 'import paddle.fluid.layers as layers\n'), ((2911, 2987), 'paddle.fluid.layers.concat', 'layers.concat', ([], {'input': '[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc]', 'axis': '(1)'}), '(input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1)\n', (2924, 2987), True, 'import paddle.fluid.layers as layers\n'), ((3026, 3077), 'paddle.fluid.layers.fc', 'layers.fc', ([], {'input': 'concat_embed', 'size': '(200)', 'act': '"""tanh"""'}), "(input=concat_embed, size=200, act='tanh')\n", (3035, 3077), True, 'import paddle.fluid.layers as layers\n'), ((3226, 3280), 'paddle.fluid.layers.data', 'layers.data', ([], {'name': '"""movie_id"""', 'shape': '[1]', 'dtype': '"""int64"""'}), "(name='movie_id', shape=[1], dtype='int64')\n", (3237, 3280), True, 'import paddle.fluid.layers as layers\n'), ((3296, 3420), 'paddle.fluid.layers.embedding', 'layers.embedding', ([], {'input': 'mov_id', 'dtype': '"""float32"""', 'size': '[MOV_DICT_SIZE, 32]', 'param_attr': '"""movie_table"""', 'is_sparse': 'IS_SPARSE'}), "(input=mov_id, dtype='float32', size=[MOV_DICT_SIZE, 32],\n param_attr='movie_table', is_sparse=IS_SPARSE)\n", (3312, 3420), True, 'import paddle.fluid.layers as layers\n'), ((3555, 3588), 'paddle.fluid.layers.fc', 'layers.fc', ([], {'input': 'mov_emb', 'size': '(32)'}), '(input=mov_emb, size=32)\n', (3564, 3588), True, 'import paddle.fluid.layers as layers\n'), ((3683, 3753), 'paddle.fluid.layers.data', 'layers.data', ([], {'name': '"""category_id"""', 'shape': '[1]', 'dtype': '"""int64"""', 'lod_level': '(1)'}), "(name='category_id', shape=[1], dtype='int64', lod_level=1)\n", (3694, 3753), True, 'import paddle.fluid.layers as layers\n'), ((3870, 3961), 'paddle.fluid.layers.embedding', 'layers.embedding', ([], {'input': 'category_id', 'size': '[CATEGORY_DICT_SIZE, 32]', 'is_sparse': 'IS_SPARSE'}), '(input=category_id, size=[CATEGORY_DICT_SIZE, 32],\n is_sparse=IS_SPARSE)\n', (3886, 3961), True, 'import paddle.fluid.layers as layers\n'), ((4071, 4134), 'paddle.fluid.layers.sequence_pool', 'layers.sequence_pool', ([], {'input': 'mov_categories_emb', 'pool_type': '"""sum"""'}), "(input=mov_categories_emb, pool_type='sum')\n", (4091, 4134), True, 'import paddle.fluid.layers as layers\n'), ((4284, 4354), 'paddle.fluid.layers.data', 'layers.data', ([], {'name': '"""movie_title"""', 'shape': '[1]', 'dtype': '"""int64"""', 'lod_level': '(1)'}), "(name='movie_title', shape=[1], dtype='int64', lod_level=1)\n", (4295, 4354), True, 'import paddle.fluid.layers as layers\n'), ((4469, 4562), 'paddle.fluid.layers.embedding', 'layers.embedding', ([], {'input': 'mov_title_id', 'size': '[MOV_TITLE_DICT_SIZE, 32]', 'is_sparse': 'IS_SPARSE'}), '(input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32],\n is_sparse=IS_SPARSE)\n', (4485, 4562), True, 'import paddle.fluid.layers as layers\n'), ((4655, 4763), 'paddle.fluid.nets.sequence_conv_pool', 'nets.sequence_conv_pool', ([], {'input': 'mov_title_emb', 'num_filters': '(32)', 'filter_size': '(3)', 'act': '"""tanh"""', 'pool_type': '"""sum"""'}), "(input=mov_title_emb, num_filters=32, filter_size=3,\n act='tanh', pool_type='sum')\n", (4678, 4763), True, 'import paddle.fluid.nets as nets\n'), ((4960, 5036), 'paddle.fluid.layers.concat', 'layers.concat', ([], {'input': '[mov_fc, mov_categories_hidden, mov_title_conv]', 'axis': '(1)'}), '(input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1)\n', (4973, 5036), True, 'import paddle.fluid.layers as layers\n'), ((5113, 5164), 'paddle.fluid.layers.fc', 'layers.fc', ([], {'input': 'concat_embed', 'size': '(200)', 'act': '"""tanh"""'}), "(input=concat_embed, size=200, act='tanh')\n", (5122, 5164), True, 'import paddle.fluid.layers as layers\n'), ((5362, 5426), 'paddle.fluid.layers.cos_sim', 'layers.cos_sim', ([], {'X': 'usr_combined_features', 'Y': 'mov_combined_features'}), '(X=usr_combined_features, Y=mov_combined_features)\n', (5376, 5426), True, 'import paddle.fluid.layers as layers\n'), ((5445, 5481), 'paddle.fluid.layers.scale', 'layers.scale', ([], {'x': 'inference', 'scale': '(5.0)'}), '(x=inference, scale=5.0)\n', (5457, 5481), True, 'import paddle.fluid.layers as layers\n'), ((5495, 5548), 'paddle.fluid.layers.data', 'layers.data', ([], {'name': '"""score"""', 'shape': '[1]', 'dtype': '"""float32"""'}), "(name='score', shape=[1], dtype='float32')\n", (5506, 5548), True, 'import paddle.fluid.layers as layers\n'), ((5567, 5623), 'paddle.fluid.layers.square_error_cost', 'layers.square_error_cost', ([], {'input': 'scale_infer', 'label': 'label'}), '(input=scale_infer, label=label)\n', (5591, 5623), True, 'import paddle.fluid.layers as layers\n'), ((5639, 5663), 'paddle.fluid.layers.mean', 'layers.mean', (['square_cost'], {}), '(square_cost)\n', (5650, 5663), True, 'import paddle.fluid.layers as layers\n'), ((5896, 5927), 'paddle.fluid.optimizer.SGDOptimizer', 'SGDOptimizer', ([], {'learning_rate': '(0.2)'}), '(learning_rate=0.2)\n', (5908, 5927), False, 'from paddle.fluid.optimizer import SGDOptimizer\n'), ((6042, 6057), 'paddle.fluid.executor.Executor', 'Executor', (['place'], {}), '(place)\n', (6050, 6057), False, 'from paddle.fluid.executor import Executor\n'), ((9621, 9642), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (9635, 9642), True, 'import paddle.fluid as fluid\n'), ((9666, 9684), 'paddle.fluid.core.Scope', 'fluid.core.Scope', ([], {}), '()\n', (9682, 9684), True, 'import paddle.fluid as fluid\n'), ((1203, 1241), 'paddle.dataset.movielens.max_user_id', 'paddle.dataset.movielens.max_user_id', ([], {}), '()\n', (1239, 1241), False, 'import paddle\n'), ((2489, 2526), 'paddle.dataset.movielens.max_job_id', 'paddle.dataset.movielens.max_job_id', ([], {}), '()\n', (2524, 2526), False, 'import paddle\n'), ((3168, 3207), 'paddle.dataset.movielens.max_movie_id', 'paddle.dataset.movielens.max_movie_id', ([], {}), '()\n', (3205, 3207), False, 'import paddle\n'), ((3619, 3662), 'paddle.dataset.movielens.movie_categories', 'paddle.dataset.movielens.movie_categories', ([], {}), '()\n', (3660, 3662), False, 'import paddle\n'), ((4215, 4262), 'paddle.dataset.movielens.get_movie_title_dict', 'paddle.dataset.movielens.get_movie_title_dict', ([], {}), '()\n', (4260, 4262), False, 'import paddle\n'), ((5978, 5996), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (5993, 5996), True, 'import paddle.fluid as fluid\n'), ((6014, 6030), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (6028, 6030), True, 'import paddle.fluid as fluid\n'), ((6258, 6289), 'paddle.dataset.movielens.test', 'paddle.dataset.movielens.test', ([], {}), '()\n', (6287, 6289), False, 'import paddle\n'), ((6700, 6734), 'paddle.fluid.DataFeeder', 'fluid.DataFeeder', (['feed_list', 'place'], {}), '(feed_list, place)\n', (6716, 6734), True, 'import paddle.fluid as fluid\n'), ((8414, 8454), 'os.getenv', 'os.getenv', (['"""PADDLE_PSERVER_PORT"""', '"""6174"""'], {}), "('PADDLE_PSERVER_PORT', '6174')\n", (8423, 8454), False, 'import os\n'), ((8477, 8508), 'os.getenv', 'os.getenv', (['"""PADDLE_PSERVER_IPS"""'], {}), "('PADDLE_PSERVER_IPS')\n", (8486, 8508), False, 'import os\n'), ((8892, 8936), 'os.getenv', 'os.getenv', (['"""PADDLE_TRAINING_ROLE"""', '"""TRAINER"""'], {}), "('PADDLE_TRAINING_ROLE', 'TRAINER')\n", (8901, 8936), False, 'import os\n'), ((8949, 8977), 'paddle.fluid.DistributeTranspiler', 'fluid.DistributeTranspiler', ([], {}), '()\n', (8975, 8977), True, 'import paddle.fluid as fluid\n'), ((9558, 9576), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (9573, 9576), True, 'import paddle.fluid as fluid\n'), ((9594, 9610), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (9608, 9610), True, 'import paddle.fluid as fluid\n'), ((9694, 9728), 'paddle.fluid.scope_guard', 'fluid.scope_guard', (['inference_scope'], {}), '(inference_scope)\n', (9711, 9728), True, 'import paddle.fluid as fluid\n'), ((10096, 10144), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', (['save_dirname', 'exe'], {}), '(save_dirname, exe)\n', (10125, 10144), True, 'import paddle.fluid as fluid\n'), ((5825, 5853), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (5851, 5853), True, 'import paddle.fluid as fluid\n'), ((6122, 6154), 'paddle.dataset.movielens.train', 'paddle.dataset.movielens.train', ([], {}), '()\n', (6152, 6154), False, 'import paddle\n'), ((6532, 6567), 'paddle.fluid.framework.default_startup_program', 'framework.default_startup_program', ([], {}), '()\n', (6565, 6567), True, 'import paddle.fluid.framework as framework\n'), ((8359, 8387), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (8385, 8387), True, 'import paddle.fluid as fluid\n'), ((8721, 8749), 'os.getenv', 'os.getenv', (['"""PADDLE_TRAINERS"""'], {}), "('PADDLE_TRAINERS')\n", (8730, 8749), False, 'import os\n'), ((8836, 8866), 'os.getenv', 'os.getenv', (['"""PADDLE_TRAINER_ID"""'], {}), "('PADDLE_TRAINER_ID')\n", (8845, 8866), False, 'import os\n'), ((12669, 12689), 'numpy.array', 'np.array', (['results[0]'], {}), '(results[0])\n', (12677, 12689), True, 'import numpy as np\n'), ((12737, 12771), 'paddle.fluid.core.is_compiled_with_cuda', 'fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (12769, 12771), True, 'import paddle.fluid as fluid\n'), ((7081, 7098), 'numpy.array', 'np.array', (['outs[0]'], {}), '(outs[0])\n', (7089, 7098), True, 'import numpy as np\n'), ((8778, 8797), 'os.getenv', 'os.getenv', (['"""POD_IP"""'], {}), "('POD_IP')\n", (8787, 8797), False, 'import os\n'), ((11583, 11618), 'numpy.array', 'np.array', (['[10, 8, 9]'], {'dtype': '"""int64"""'}), "([10, 8, 9], dtype='int64')\n", (11591, 11618), True, 'import numpy as np\n'), ((11749, 11802), 'numpy.array', 'np.array', (['[1069, 4140, 2923, 710, 988]'], {'dtype': '"""int64"""'}), "([1069, 4140, 2923, 710, 988], dtype='int64')\n", (11757, 11802), True, 'import numpy as np\n'), ((8279, 8321), 'sys.exit', 'sys.exit', (['"""got NaN loss, training failed."""'], {}), "('got NaN loss, training failed.')\n", (8287, 8321), False, 'import sys\n'), ((10943, 10954), 'numpy.int64', 'np.int64', (['(1)'], {}), '(1)\n', (10951, 10954), True, 'import numpy as np\n'), ((11070, 11081), 'numpy.int64', 'np.int64', (['(1)'], {}), '(1)\n', (11078, 11081), True, 'import numpy as np\n'), ((11191, 11202), 'numpy.int64', 'np.int64', (['(0)'], {}), '(0)\n', (11199, 11202), True, 'import numpy as np\n'), ((11312, 11324), 'numpy.int64', 'np.int64', (['(10)'], {}), '(10)\n', (11320, 11324), True, 'import numpy as np\n'), ((11438, 11451), 'numpy.int64', 'np.int64', (['(783)'], {}), '(783)\n', (11446, 11451), True, 'import numpy as np\n'), ((7655, 7677), 'numpy.array', 'np.array', (['avg_cost_set'], {}), '(avg_cost_set)\n', (7663, 7677), True, 'import numpy as np\n'), ((7890, 8050), 'paddle.fluid.io.save_inference_model', 'fluid.io.save_inference_model', (['save_dirname', "['user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id',\n 'movie_title']", '[scale_infer]', 'exe'], {}), "(save_dirname, ['user_id', 'gender_id',\n 'age_id', 'job_id', 'movie_id', 'category_id', 'movie_title'], [\n scale_infer], exe)\n", (7919, 8050), True, 'import paddle.fluid as fluid\n')] |
# the code logic is the same as mask_bn.py
# keep as a seperate file to distinguish between PatchGuard and PatchGuard++
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torchvision import datasets, transforms
import nets.bagnet
import nets.resnet
from utils.defense_utils import *
import os
import joblib
import argparse
from tqdm import tqdm
import numpy as np
from scipy.special import softmax
from math import ceil
import PIL
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir",default='checkpoints',type=str,help="path to checkpoints")
parser.add_argument('--data_dir', default='data', type=str,help="path to data")
parser.add_argument('--dataset', default='imagenette', choices=('imagenette','imagenet','cifar'),type=str,help="dataset")
parser.add_argument("--model",default='bagnet33',type=str,help="model name")
parser.add_argument("--clip",default=-1,type=int,help="clipping value; do clipping when this argument is set to positive")
parser.add_argument("--aggr",default='none',type=str,help="aggregation methods. set to none for local feature")
parser.add_argument("--skip",default=1,type=int,help="number of example to skip")
parser.add_argument("--thres",default=0.0,type=float,help="detection threshold for robust masking")
parser.add_argument("--patch_size",default=-1,type=int,help="size of the adversarial patch")
parser.add_argument("--det",action='store_true',help="use PG++ attack detection")
parser.add_argument("--tau",default=0.0,type=float,help="tau")
args = parser.parse_args()
MODEL_DIR=os.path.join('.',args.model_dir)
DATA_DIR=os.path.join(args.data_dir,args.dataset)
DATASET = args.dataset
def get_dataset(ds,data_dir):
if ds in ['imagenette','imagenet']:
ds_dir=os.path.join(data_dir,'val')
ds_transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
dataset_ = datasets.ImageFolder(ds_dir,ds_transforms)
class_names = dataset_.classes
elif ds == 'cifar':
ds_transforms = transforms.Compose([
transforms.Resize(192, interpolation=PIL.Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
dataset_ = datasets.CIFAR10(root=data_dir, train=False, download=True, transform=ds_transforms)
class_names = dataset_.classes
return dataset_,class_names
val_dataset_,class_names = get_dataset(DATASET,DATA_DIR)
skips = list(range(0, len(val_dataset_), args.skip))
val_dataset = torch.utils.data.Subset(val_dataset_, skips)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=8,shuffle=False)
#build and initialize model
device = 'cuda' #if torch.cuda.is_available() else 'cpu'
if args.clip > 0:
clip_range = [0,args.clip]
else:
clip_range = None
if 'bagnet17' in args.model:
model = nets.bagnet.bagnet17(pretrained=True,clip_range=clip_range,aggregation=args.aggr)
rf_size=17
elif 'bagnet33' in args.model:
model = nets.bagnet.bagnet33(pretrained=True,clip_range=clip_range,aggregation=args.aggr)
rf_size=33
elif 'bagnet9' in args.model:
model = nets.bagnet.bagnet9(pretrained=True,clip_range=clip_range,aggregation=args.aggr)
rf_size=9
if DATASET == 'imagenette':
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, len(class_names))
model = torch.nn.DataParallel(model)
checkpoint = torch.load(os.path.join(MODEL_DIR,args.model+'_nette.pth'))
model.load_state_dict(checkpoint['model_state_dict'])
args.patch_size = args.patch_size if args.patch_size>0 else 32
elif DATASET == 'imagenet':
model = torch.nn.DataParallel(model)
checkpoint = torch.load(os.path.join(MODEL_DIR,args.model+'_net.pth'))
model.load_state_dict(checkpoint['state_dict'])
args.patch_size = args.patch_size if args.patch_size>0 else 32
elif DATASET == 'cifar':
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, len(class_names))
model = torch.nn.DataParallel(model)
checkpoint = torch.load(os.path.join(MODEL_DIR,args.model+'_192_cifar.pth'))
model.load_state_dict(checkpoint['net'])
args.patch_size = args.patch_size if args.patch_size>0 else 30
rf_stride=8
window_size = ceil((args.patch_size + rf_size -1) / rf_stride)
print("window_size",window_size)
model = model.to(device)
model.eval()
cudnn.benchmark = True
accuracy_list=[]
result_list=[]
clean_corr=0
for data,labels in tqdm(val_loader):
data=data.to(device)
labels = labels.numpy()
output_clean = model(data).detach().cpu().numpy() # logits
#output_clean = softmax(output_clean,axis=-1) # confidence
#output_clean = (output_clean > 0.2).astype(float) # predictions with confidence threshold
#note: the provable analysis of robust masking is cpu-intensive and can take some time to finish
#you can dump the local feature and do the provable analysis with another script so that GPU mempry is not always occupied
for i in range(len(labels)):
if args.det:
local_feature = output_clean[i]
#result,clean_pred = provable_detection(local_feature,labels[i],tau=args.tau,window_shape=[window_size,window_size])
#clean_corr += clean_pred
clean_pred = pg2_detection(local_feature,tau=args.tau,window_shape=[window_size,window_size])
clean_corr += clean_pred == labels[i]
result = pg2_detection_provable(local_feature,labels[i],tau=args.tau,window_shape=[window_size,window_size])
result_list.append(result)
acc_clean = np.sum(np.argmax(np.mean(output_clean,axis=(1,2)),axis=1) == labels)
accuracy_list.append(acc_clean)
cases,cnt=np.unique(result_list,return_counts=True)
print("Provable robust accuracy:",cnt[-1]/len(result_list) if len(cnt)==3 else 0)
print("Clean accuracy with defense:",clean_corr/len(result_list))
print("Clean accuracy without defense:",np.sum(accuracy_list)/len(val_dataset))
print("------------------------------")
print("Provable analysis cases (0: incorrect prediction; 1: vulnerable; 2: provably robust):",cases)
print("Provable analysis breakdown",cnt/len(result_list)) | [
"torch.utils.data.Subset",
"tqdm.tqdm",
"numpy.sum",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"math.ceil",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor",
"torchvision.datasets.ImageFolder",
"torchvision.datasets.CIFAR10",
"numpy.mean",
"torchvision.tran... | [((532, 557), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (555, 557), False, 'import argparse\n'), ((1625, 1658), 'os.path.join', 'os.path.join', (['"""."""', 'args.model_dir'], {}), "('.', args.model_dir)\n", (1637, 1658), False, 'import os\n'), ((1667, 1708), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.dataset'], {}), '(args.data_dir, args.dataset)\n', (1679, 1708), False, 'import os\n'), ((2782, 2826), 'torch.utils.data.Subset', 'torch.utils.data.Subset', (['val_dataset_', 'skips'], {}), '(val_dataset_, skips)\n', (2805, 2826), False, 'import torch\n'), ((2840, 2909), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': '(8)', 'shuffle': '(False)'}), '(val_dataset, batch_size=8, shuffle=False)\n', (2867, 2909), False, 'import torch\n'), ((4499, 4548), 'math.ceil', 'ceil', (['((args.patch_size + rf_size - 1) / rf_stride)'], {}), '((args.patch_size + rf_size - 1) / rf_stride)\n', (4503, 4548), False, 'from math import ceil\n'), ((4714, 4730), 'tqdm.tqdm', 'tqdm', (['val_loader'], {}), '(val_loader)\n', (4718, 4730), False, 'from tqdm import tqdm\n'), ((5972, 6014), 'numpy.unique', 'np.unique', (['result_list'], {'return_counts': '(True)'}), '(result_list, return_counts=True)\n', (5981, 6014), True, 'import numpy as np\n'), ((3620, 3648), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (3641, 3648), False, 'import torch\n'), ((1816, 1845), 'os.path.join', 'os.path.join', (['data_dir', '"""val"""'], {}), "(data_dir, 'val')\n", (1828, 1845), False, 'import os\n'), ((2130, 2173), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['ds_dir', 'ds_transforms'], {}), '(ds_dir, ds_transforms)\n', (2150, 2173), False, 'from torchvision import datasets, transforms\n'), ((3677, 3727), 'os.path.join', 'os.path.join', (['MODEL_DIR', "(args.model + '_nette.pth')"], {}), "(MODEL_DIR, args.model + '_nette.pth')\n", (3689, 3727), False, 'import os\n'), ((3898, 3926), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (3919, 3926), False, 'import torch\n'), ((6202, 6223), 'numpy.sum', 'np.sum', (['accuracy_list'], {}), '(accuracy_list)\n', (6208, 6223), True, 'import numpy as np\n'), ((2501, 2590), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': 'data_dir', 'train': '(False)', 'download': '(True)', 'transform': 'ds_transforms'}), '(root=data_dir, train=False, download=True, transform=\n ds_transforms)\n', (2517, 2590), False, 'from torchvision import datasets, transforms\n'), ((3955, 4003), 'os.path.join', 'os.path.join', (['MODEL_DIR', "(args.model + '_net.pth')"], {}), "(MODEL_DIR, args.model + '_net.pth')\n", (3967, 4003), False, 'import os\n'), ((4249, 4277), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (4270, 4277), False, 'import torch\n'), ((1906, 1928), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (1923, 1928), False, 'from torchvision import datasets, transforms\n'), ((1946, 1972), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (1967, 1972), False, 'from torchvision import datasets, transforms\n'), ((1990, 2011), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2009, 2011), False, 'from torchvision import datasets, transforms\n'), ((2029, 2095), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2049, 2095), False, 'from torchvision import datasets, transforms\n'), ((4306, 4360), 'os.path.join', 'os.path.join', (['MODEL_DIR', "(args.model + '_192_cifar.pth')"], {}), "(MODEL_DIR, args.model + '_192_cifar.pth')\n", (4318, 4360), False, 'import os\n'), ((5872, 5906), 'numpy.mean', 'np.mean', (['output_clean'], {'axis': '(1, 2)'}), '(output_clean, axis=(1, 2))\n', (5879, 5906), True, 'import numpy as np\n'), ((2293, 2348), 'torchvision.transforms.Resize', 'transforms.Resize', (['(192)'], {'interpolation': 'PIL.Image.BICUBIC'}), '(192, interpolation=PIL.Image.BICUBIC)\n', (2310, 2348), False, 'from torchvision import datasets, transforms\n'), ((2362, 2383), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2381, 2383), False, 'from torchvision import datasets, transforms\n'), ((2397, 2468), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (2417, 2468), False, 'from torchvision import datasets, transforms\n')] |
# ============================================================================
# Copyright 2021 The AIMM team at Shenzhen Bay Laboratory & Peking University
#
# People: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# This code is a part of Cybertron-Code package.
#
# The Cybertron-Code is open-source software based on the AI-framework:
# MindSpore (https://www.mindspore.cn/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""models"""
import numpy as np
import mindspore as ms
from mindspore import nn
from mindspore import Tensor
from mindspore.ops import functional as F
from mindspore.ops import operations as P
from mindspore.common.initializer import Normal
from .units import units
from .blocks import Dense, Residual
from .interactions import SchNetInteraction
from .interactions import PhysNetModule
from .interactions import NeuralInteractionUnit
from .base import ResFilter, GraphNorm
from .cutoff import get_cutoff
from .rbf import GaussianSmearing, LogGaussianDistribution
from .activations import ShiftedSoftplus, Swish
__all__ = [
"DeepGraphMolecularModel",
"SchNet",
"PhysNet",
"MolCT",
]
class DeepGraphMolecularModel(nn.Cell):
r"""Basic class for graph neural network (GNN) based deep molecular model
Args:
num_elements (int): maximum number of atomic types
num_rbf (int): number of the serial of radical basis functions (RBF)
dim_feature (int): dimension of the vectors for atomic embedding
atom_types (ms.Tensor[int], optional): atomic index
rbf_function(nn.Cell, optional): the algorithm to calculate RBF
cutoff_network (nn.Cell, optional): the algorithm to calculate cutoff.
"""
def __init__(
self,
num_elements,
min_rbf_dis,
max_rbf_dis,
num_rbf,
dim_feature,
n_interactions,
interactions=None,
unit_length='nm',
activation=None,
rbf_sigma=None,
trainable_rbf=False,
rbf_function=None,
cutoff=None,
cutoff_network=None,
rescale_rbf=False,
use_distances=True,
use_public_filter=False,
use_graph_norm=False,
dis_filter=None
):
super().__init__()
self.num_elements = num_elements
self.dim_feature = dim_feature
self.num_rbf = num_rbf
self.rbf_function = rbf_function
self.rescale_rbf = rescale_rbf
self.activation = activation
self.interaction_types = interactions
if isinstance(interactions, list):
self.n_interactions = len(interactions)
else:
self.n_interactions = n_interactions
self.unit_length = unit_length
units.set_length_unit(self.unit_length)
self.use_distances = use_distances
self.use_bonds = False
self.network_name = 'DeepGraphMolecularModel'
self.read_all_interactions = False
# make a lookup table to store embeddings for each element (up to atomic
# number max_z) each of which is a vector of size dim_feature
self.atom_embedding = nn.Embedding(
num_elements,
dim_feature,
use_one_hot=True,
embedding_table=Normal(1.0))
self.bond_embedding = [None,]
self.bond_filter = [None,]
self.use_public_filter = use_public_filter
self.dis_filter = dis_filter
self.fixed_atoms = False
# layer for expanding interatomic distances in a basis
if rbf_function is not None:
self.rbf_function = rbf_function(
d_min=min_rbf_dis,
d_max=max_rbf_dis,
num_rbf=num_rbf,
sigma=rbf_sigma,
trainable=trainable_rbf)
else:
self.rbf_function = None
self.cutoff_network = None
self.cutoff = None
if cutoff_network is not None:
if cutoff is None:
self.cutoff = max_rbf_dis
else:
self.cutoff = cutoff
self.cutoff_network = get_cutoff(
cutoff_network,
r_max=self.cutoff,
return_mask=True,
reverse=False)
self.interactions = [None,]
self.interaction_typenames = []
self.use_graph_norm = use_graph_norm
self.use_pub_norm = False
if self.use_graph_norm:
if self.use_pub_norm:
self.graph_norm = nn.CellList(
[GraphNorm(dim_feature) * self.n_interactions]
)
else:
self.graph_norm = nn.CellList(
[GraphNorm(dim_feature) for _ in range(self.n_interactions)]
)
else:
self.graph_norm = None
self.decoder = 'halve'
self.merge_method = None
self.far_type = None
self.zeros = P.Zeros()
self.ones = P.Ones()
def print_info(self):
"""print info"""
print('---with GNN-based deep molecular model: ', self.network_name)
print('------with atom embedding size: ' + str(self.num_elements))
print('------with cutoff distance: ' +
str(self.cutoff) + ' ' + self.unit_length)
print('------with number of RBF functions: ' + str(self.num_rbf))
print('------with bond connection: ' +
('Yes' if self.use_bonds else 'No'))
print('------with feature dimension: ' + str(self.dim_feature))
print('------with interaction layers:')
for i, inter in enumerate(self.interactions):
print('------' + str(i + 1) + '. ' + inter.name +
'(' + self.interaction_typenames[i] + ')')
inter.print_info()
print('------with total layers: ' + str(len(self.interactions)))
print('------output all interaction layers: ' +
('Yes'if self.read_all_interactions else 'No'))
def set_fixed_atoms(self, fixed_atoms=True):
self.fixed_atoms = fixed_atoms
def set_fixed_neighbors(self, flag=True):
for interaction in self.interactions:
interaction.set_fixed_neighbors(flag)
def _calc_cutoffs(
self,
r_ij=1,
neighbor_mask=None,
bonds=None,
bond_mask=None,
atom_mask=None):
"""_calc_cutoffs"""
self.bonds_t = bonds
self.bond_mask_t = bond_mask
self.atom_mask_t = atom_mask
if self.cutoff_network is None:
return F.ones_like(r_ij), neighbor_mask
return self.cutoff_network(r_ij, neighbor_mask)
def _get_rbf(self, dis):
"""_get_rbf"""
# expand interatomic distances (for example, Gaussian smearing)
if self.rbf_function is None:
rbf = F.expand_dims(dis, -1)
else:
rbf = self.rbf_function(dis)
if self.rescale_rbf:
rbf = rbf * 2.0 - 1.0
if self.dis_filter is not None:
return self.dis_filter(rbf)
return rbf
def _get_self_rbf(self):
return 0
def construct(
self,
r_ij=1,
atom_types=None,
atom_mask=None,
neighbors=None,
neighbor_mask=None,
bonds=None,
bond_mask=None):
"""Compute interaction output.
Args:
r_ij (ms.Tensor[float], [B, A, N]): interatomic distances of (N_b, N_a, N_nbh) shape.
neighbors (ms.Tensor[int]): indices of neighbors of (N_b, N_a, N_nbh) shape.
neighbor_mask (ms.Tensor[bool], optional): mask to filter out non-existing neighbors
introduced via padding.
atom_types (ms.Tensor[int], optional): atomic index
Returns:
torch.Tensor: block output with (N_b, N_a, N_basis) shape.
"""
if self.fixed_atoms:
exones = self.ones((r_ij.shape[0], 1, 1), r_ij.dtype)
e = exones * self.atom_embedding(atom_types)
if atom_mask is not None:
atom_mask = (exones * atom_mask) > 0
else:
e = self.atom_embedding(atom_types)
if self.use_distances:
f_ij = self._get_rbf(r_ij)
f_ii = self._get_self_rbf()
else:
f_ii = 1
f_ij = 1
b_ii = 0
b_ij = 0
# apply cutoff
c_ij, mask = self._calc_cutoffs(
r_ij, neighbor_mask, bonds, bond_mask, atom_mask)
# continuous-filter convolution interaction block followed by Dense
# layer
x = e
n_interactions = len(self.interactions)
xlist = []
for i in range(n_interactions):
x = self.interactions[i](
x, e, f_ii, f_ij, b_ii, b_ij, c_ij, neighbors, mask)
if self.use_graph_norm:
x = self.graph_norm[i](x)
if self.read_all_interactions:
xlist.append(x)
if self.read_all_interactions:
return x, xlist
return x, None
class SchNet(DeepGraphMolecularModel):
r"""SchNet Model.
References:
<NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.,
SchNet - a deep learning architecture for molceules and materials.
The Journal of Chemical Physics 148 (24), 241722. 2018.
Args:
num_elements (int): maximum number of atomic types
num_rbf (int): number of the serial of radical basis functions (RBF)
dim_feature (int): dimension of the vectors for atomic embedding
dim_filter (int): dimension of the vectors for filters used in continuous-filter convolution.
n_interactions (int, optional): number of interaction blocks.
max_distance (float): the maximum distance to calculate RBF.
atom_types (ms.Tensor[int], optional): atomic index
rbf_function(nn.Cell, optional): the algorithm to calculate RBF
cutoff_network (nn.Cell, optional): the algorithm to calculate cutoff.
normalize_filter (bool, optional): if True, divide aggregated filter by number
of neighbors over which convolution is applied.
coupled_interactions (bool, optional): if True, share the weights across
interaction blocks and filter-generating networks.
trainable_gaussians (bool, optional): If True, widths and offset of Gaussian
functions are adjusted during training process.
"""
def __init__(
self,
num_elements=100,
dim_feature=64,
min_rbf_dis=0.02,
max_rbf_dis=0.5,
num_rbf=32,
dim_filter=64,
n_interactions=3,
activation=ShiftedSoftplus(),
unit_length='nm',
rbf_sigma=None,
rbf_function=GaussianSmearing,
cutoff=None,
cutoff_network='cosine',
normalize_filter=False,
coupled_interactions=False,
trainable_rbf=False,
use_graph_norm=False,
):
super().__init__(
num_elements=num_elements,
dim_feature=dim_feature,
min_rbf_dis=min_rbf_dis,
max_rbf_dis=max_rbf_dis,
num_rbf=num_rbf,
n_interactions=n_interactions,
activation=activation,
unit_length=unit_length,
rbf_sigma=rbf_sigma,
rbf_function=rbf_function,
cutoff=cutoff,
cutoff_network=cutoff_network,
rescale_rbf=False,
use_public_filter=False,
use_graph_norm=use_graph_norm,
trainable_rbf=trainable_rbf,
)
self.network_name = 'SchNet'
# block for computing interaction
if coupled_interactions:
self.interaction_typenames = ['D0',] * self.n_interactions
# use the same SchNetInteraction instance (hence the same weights)
self.interactions = nn.CellList(
[
SchNetInteraction(
dim_feature=dim_feature,
num_rbf=num_rbf,
dim_filter=dim_filter,
activation=self.activation,
normalize_filter=normalize_filter,
)
]
* self.n_interactions
)
else:
self.interaction_typenames = [
'D' + str(i) for i in range(self.n_interactions)]
# use one SchNetInteraction instance for each interaction
self.interactions = nn.CellList(
[
SchNetInteraction(
dim_feature=dim_feature,
num_rbf=num_rbf,
dim_filter=dim_filter,
activation=self.activation,
normalize_filter=normalize_filter,
)
for _ in range(self.n_interactions)
]
)
class PhysNet(DeepGraphMolecularModel):
r"""PhysNet Model
References:
<NAME>. and <NAME>.,
PhysNet: A neural network for predicting energyies, forces, dipole moments, and partial charges.
The Journal of Chemical Theory and Computation 2019, 15(6), 3678-3693.
Args:
num_elements (int): maximum number of atomic types
num_rbf (int): number of the serial of radical basis functions (RBF)
dim_feature (int): dimension of the vectors for atomic embedding
dim_filter (int): dimension of the vectors for filters used in continuous-filter convolution.
n_interactions (int, optional): number of interaction blocks.
max_distance (float): the maximum distance to calculate RBF.
atom_types (ms.Tensor[int], optional): atomic index
rbf_function(nn.Cell, optional): the algorithm to calculate RBF
cutoff_network (nn.Cell, optional): the algorithm to calculate cutoff.
normalize_filter (bool, optional): if True, divide aggregated filter by number
of neighbors over which convolution is applied.
coupled_interactions (bool, optional): if True, share the weights across
interaction blocks and filter-generating networks.
trainable_gaussians (bool, optional): If True, widths and offset of Gaussian
functions are adjusted during training process.
"""
def __init__(
self,
num_elements=100,
min_rbf_dis=0.02,
max_rbf_dis=1,
num_rbf=64,
dim_feature=128,
n_interactions=5,
n_inter_residual=3,
n_outer_residual=2,
unit_length='nm',
activation=ShiftedSoftplus(),
rbf_sigma=None,
rbf_function=GaussianSmearing,
cutoff=None,
cutoff_network='smooth',
use_graph_norm=False,
coupled_interactions=False,
trainable_rbf=False,
):
super().__init__(
num_elements=num_elements,
dim_feature=dim_feature,
min_rbf_dis=min_rbf_dis,
max_rbf_dis=max_rbf_dis,
num_rbf=num_rbf,
n_interactions=n_interactions,
activation=activation,
rbf_sigma=rbf_sigma,
unit_length=unit_length,
rbf_function=rbf_function,
cutoff=cutoff,
cutoff_network=cutoff_network,
rescale_rbf=False,
use_graph_norm=use_graph_norm,
use_public_filter=False,
trainable_rbf=trainable_rbf,
)
self.network_name = 'PhysNet'
# block for computing interaction
if coupled_interactions:
self.interaction_typenames = ['D0',] * self.n_interactions
# use the same SchNetInteraction instance (hence the same weights)
self.interactions = nn.CellList(
[
PhysNetModule(
num_rbf=num_rbf,
dim_feature=dim_feature,
activation=self.activation,
n_inter_residual=n_inter_residual,
n_outer_residual=n_outer_residual,
)
]
* self.n_interactions
)
else:
self.interaction_typenames = [
'D' + str(i) for i in range(self.n_interactions)]
# use one SchNetInteraction instance for each interaction
self.interactions = nn.CellList(
[
PhysNetModule(
num_rbf=num_rbf,
dim_feature=dim_feature,
activation=self.activation,
n_inter_residual=n_inter_residual,
n_outer_residual=n_outer_residual,
)
for _ in range(self.n_interactions)
]
)
self.readout = None
def set_fixed_neighbors(self, flag=True):
for interaction in self.interactions:
interaction.set_fixed_neighbors(flag)
class MolCT(DeepGraphMolecularModel):
r"""Molecular Configuration Transformer (MolCT) Model
References:
<NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.,
Molecular CT: unifying geometry and representation learning for molecules at different scales
ArXiv: 2012.11816
Args:
"""
def __init__(
self,
num_elements=100,
min_rbf_dis=0.05,
max_rbf_dis=1,
num_rbf=32,
dim_feature=64,
n_interactions=3,
interactions=None,
n_heads=8,
max_cycles=10,
activation=Swish(),
unit_length='nm',
self_dis=None,
rbf_sigma=None,
rbf_function=LogGaussianDistribution,
cutoff=None,
cutoff_network='smooth',
use_distances=True,
use_bonds=False,
num_bond_types=16,
public_dis_filter=True,
public_bond_filter=True,
use_feed_forward=False,
trainable_gaussians=False,
use_pondering=True,
fixed_cycles=False,
rescale_rbf=True,
use_graph_norm=False,
use_time_embedding=True,
coupled_interactions=False,
use_mcr=False,
debug=False,
):
super().__init__(
num_elements=num_elements,
dim_feature=dim_feature,
min_rbf_dis=min_rbf_dis,
max_rbf_dis=max_rbf_dis,
n_interactions=n_interactions,
interactions=interactions,
activation=activation,
num_rbf=num_rbf,
unit_length=unit_length,
rbf_sigma=rbf_sigma,
rbf_function=rbf_function,
cutoff=cutoff,
cutoff_network=cutoff_network,
rescale_rbf=rescale_rbf,
use_graph_norm=use_graph_norm,
use_public_filter=public_dis_filter,
)
self.network_name = 'MolCT'
self.max_distance = max_rbf_dis
self.min_distance = min_rbf_dis
self.use_distances = use_distances
self.trainable_gaussians = trainable_gaussians
self.use_mcr = use_mcr
self.debug = debug
if self_dis is None:
self.self_dis = self.min_distance
else:
self.self_dis = self_dis
self.self_dis_tensor = Tensor([self.self_dis], ms.float32)
self.n_heads = n_heads
if use_time_embedding:
time_embedding = self._get_time_signal(max_cycles, dim_feature)
else:
time_embedding = [0 for _ in range(max_cycles)]
self.use_bonds = use_bonds
use_dis_inter = False
use_bond_inter = False
use_mix_inter = False
if self.interaction_types is not None:
self.use_distances = False
self.use_bonds = False
for itype in self.interaction_types:
if itype == 'dis':
use_dis_inter = True
self.use_distances = True
elif itype == 'bond':
use_bond_inter = True
self.use_bonds = True
elif itype == 'mix':
use_mix_inter = True
self.use_distances = True
self.use_bonds = True
else:
raise ValueError(
'"interactions" must be "dis", "bond" or "mix"')
else:
if self.use_distances and self.use_bonds:
use_mix_inter = True
elif self.use_distances:
use_dis_inter = True
elif self.use_bonds:
use_bond_inter = True
else:
raise ValueError(
'"use_bonds" and "use_distances" cannot be both "False"!')
inter_bond_filter = False
if self.use_bonds:
self.bond_embedding = nn.Embedding(
num_bond_types,
dim_feature,
use_one_hot=True,
embedding_table=Normal(1.0))
if public_bond_filter:
self.bond_filter = Residual(dim_feature, activation=activation)
else:
inter_bond_filter = True
inter_dis_filter = False
if self.use_distances:
if self.use_public_filter:
self.dis_filter = ResFilter(
num_rbf, dim_feature, self.activation)
# self.dis_filter = Filter(num_rbf,dim_feature,None)
# self.dis_filter = Dense(num_rbf,dim_feature,has_bias=True,activation=None)
else:
self.dis_filter = Dense(
num_rbf, dim_feature, has_bias=True, activation=None)
inter_dis_filter = True
interaction_list = []
if coupled_interactions:
if use_dis_inter:
self.dis_interaction = NeuralInteractionUnit(
dim_feature=dim_feature,
num_rbf=num_rbf,
n_heads=n_heads,
activation=self.activation,
max_cycles=max_cycles,
time_embedding=time_embedding,
use_pondering=use_pondering,
use_distances=True,
use_bonds=False,
use_dis_filter=inter_dis_filter,
use_bond_filter=False,
fixed_cycles=fixed_cycles,
use_feed_forward=use_feed_forward,
)
else:
self.dis_interaction = None
if use_bond_inter:
self.bond_interaction = NeuralInteractionUnit(
dim_feature=dim_feature,
num_rbf=num_rbf,
n_heads=n_heads,
activation=self.activation,
max_cycles=max_cycles,
time_embedding=time_embedding,
use_pondering=use_pondering,
use_distances=False,
use_bonds=True,
use_dis_filter=False,
use_bond_filter=inter_bond_filter,
fixed_cycles=fixed_cycles,
use_feed_forward=use_feed_forward,
)
else:
self.bond_interaction = None
if use_mix_inter:
self.mix_interaction = NeuralInteractionUnit(
dim_feature=dim_feature,
num_rbf=num_rbf,
n_heads=n_heads,
activation=self.activation,
max_cycles=max_cycles,
time_embedding=time_embedding,
use_pondering=use_pondering,
use_distances=True,
use_bonds=True,
use_dis_filter=inter_dis_filter,
use_bond_filter=inter_bond_filter,
fixed_cycles=fixed_cycles,
use_feed_forward=use_feed_forward,
)
else:
self.mix_interaction = None
if self.interaction_types is not None:
for inter in self.interaction_types:
if inter == 'dis':
interaction_list.append(self.dis_interaction)
self.interaction_typenames.append('D0')
elif inter == 'bond':
interaction_list.append(self.bond_interaction)
self.interaction_typenames.append('B0')
else:
interaction_list.append(self.mix_interaction)
self.interaction_typenames.append('M0')
else:
if use_dis_inter:
interaction_list = [
self.dis_interaction * self.n_interactions]
self.interaction_typenames = ['D0',] * self.n_interactions
elif use_bond_inter:
interaction_list = [
self.bond_interaction * self.n_interactions]
self.interaction_typenames = ['B0',] * self.n_interactions
else:
interaction_list = [
self.mix_interaction * self.n_interactions]
self.interaction_typenames = ['M0',] * self.n_interactions
else:
if self.interaction_types is not None:
did = 0
bid = 0
mid = 0
for inter in self.interaction_types:
use_distances = False
use_bonds = False
use_dis_filter = False
use_bond_filter = False
if inter == 'dis':
use_distances = True
use_dis_filter = inter_dis_filter
self.interaction_typenames.append('D' + str(did))
did += 1
elif inter == 'bond':
use_bonds = True
self.interaction_typenames.append('B' + str(bid))
use_bond_filter = inter_bond_filter
bid += 1
elif inter == 'mix':
use_distances = True
use_bonds = True
use_dis_filter = inter_dis_filter
use_bond_filter = inter_bond_filter
self.interaction_typenames.append('M' + str(mid))
mid += 1
interaction_list.append(
NeuralInteractionUnit(
dim_feature=dim_feature,
num_rbf=num_rbf,
n_heads=n_heads,
activation=self.activation,
max_cycles=max_cycles,
time_embedding=time_embedding,
use_pondering=use_pondering,
use_distances=use_distances,
use_bonds=use_bonds,
use_dis_filter=use_dis_filter,
use_bond_filter=use_bond_filter,
fixed_cycles=fixed_cycles,
use_feed_forward=use_feed_forward,
)
)
else:
if use_dis_inter:
t = 'D'
elif use_bond_inter:
t = 'B'
else:
t = 'M'
self.interaction_typenames = [
t + str(i) for i in range(self.n_interactions)]
interaction_list = [
NeuralInteractionUnit(
dim_feature=dim_feature,
num_rbf=num_rbf,
n_heads=n_heads,
activation=self.activation,
max_cycles=max_cycles,
time_embedding=time_embedding,
use_pondering=use_pondering,
use_distances=self.use_distances,
use_bonds=self.use_bonds,
use_dis_filter=inter_dis_filter,
use_bond_filter=inter_bond_filter,
fixed_cycles=fixed_cycles,
use_feed_forward=use_feed_forward,
)
for i in range(self.n_interactions)
]
self.n_interactions = len(interaction_list)
self.interactions = nn.CellList(interaction_list)
self.lmax_label = []
for i in range(n_interactions):
self.lmax_label.append('l' + str(i) + '_cycles')
self.fill = P.Fill()
self.concat = P.Concat(-1)
self.reducesum = P.ReduceSum()
self.reducemax = P.ReduceMax()
self.tensor_summary = P.TensorSummary()
self.scalar_summary = P.ScalarSummary()
def set_fixed_neighbors(self, flag=True):
for interaction in self.interactions:
interaction.set_fixed_neighbors(flag)
def _calc_cutoffs(
self,
r_ij=1,
neighbor_mask=None,
bonds=None,
bond_mask=None,
atom_mask=None):
mask = None
if self.use_distances:
if neighbor_mask is not None:
mask = self.concat((atom_mask, neighbor_mask))
if self.cutoff_network is None:
new_shape = (r_ij.shape[0], r_ij.shape[1] + 1, r_ij.shape[2])
return self.fill(r_ij.dtype, new_shape, 1.0), mask
rii_shape = r_ij.shape[:-1] + (1,)
r_ii = self.fill(r_ij.dtype, rii_shape, self.self_dis)
if atom_mask is not None:
r_large = F.ones_like(r_ii) * 5e4
r_ii = F.select(atom_mask, r_ii, r_large)
# [B, A, N']
r_ij = self.concat((r_ii, r_ij))
return self.cutoff_network(r_ij, mask)
if bond_mask is not None:
mask = self.concat((atom_mask, bond_mask))
return F.cast(mask > 0, ms.float32), mask
def _get_self_rbf(self):
f_ii = self._get_rbf(self.self_dis_tensor)
return f_ii
def _get_time_signal(
self,
length,
channels,
min_timescale=1.0,
max_timescale=1.0e4):
"""
Generates a [1, length, channels] timing signal consisting of sinusoids
Adapted from:
https://github.com/andreamad8/Universal-Transformer-Pytorch/blob/master/models/common_layer.py
"""
position = np.arange(length)
num_timescales = channels // 2
log_timescale_increment = (np.log(
float(max_timescale) / float(min_timescale)) / (float(num_timescales) - 1))
inv_timescales = min_timescale * \
np.exp(np.arange(num_timescales).astype(np.float) * -log_timescale_increment)
scaled_time = np.expand_dims(
position, 1) * np.expand_dims(inv_timescales, 0)
signal = np.concatenate(
[np.sin(scaled_time), np.cos(scaled_time)], axis=1)
signal = np.pad(signal, [[0, 0], [0, channels % 2]],
'constant', constant_values=[0.0, 0.0])
return Tensor(signal, ms.float32)
| [
"mindspore.ops.functional.select",
"mindspore.ops.functional.expand_dims",
"mindspore.Tensor",
"mindspore.ops.operations.ScalarSummary",
"numpy.sin",
"numpy.arange",
"mindspore.ops.operations.Fill",
"mindspore.ops.operations.Concat",
"numpy.pad",
"mindspore.ops.operations.ReduceSum",
"mindspore.... | [((5609, 5618), 'mindspore.ops.operations.Zeros', 'P.Zeros', ([], {}), '()\n', (5616, 5618), True, 'from mindspore.ops import operations as P\n'), ((5639, 5647), 'mindspore.ops.operations.Ones', 'P.Ones', ([], {}), '()\n', (5645, 5647), True, 'from mindspore.ops import operations as P\n'), ((20335, 20370), 'mindspore.Tensor', 'Tensor', (['[self.self_dis]', 'ms.float32'], {}), '([self.self_dis], ms.float32)\n', (20341, 20370), False, 'from mindspore import Tensor\n'), ((29717, 29746), 'mindspore.nn.CellList', 'nn.CellList', (['interaction_list'], {}), '(interaction_list)\n', (29728, 29746), False, 'from mindspore import nn\n'), ((29899, 29907), 'mindspore.ops.operations.Fill', 'P.Fill', ([], {}), '()\n', (29905, 29907), True, 'from mindspore.ops import operations as P\n'), ((29930, 29942), 'mindspore.ops.operations.Concat', 'P.Concat', (['(-1)'], {}), '(-1)\n', (29938, 29942), True, 'from mindspore.ops import operations as P\n'), ((29968, 29981), 'mindspore.ops.operations.ReduceSum', 'P.ReduceSum', ([], {}), '()\n', (29979, 29981), True, 'from mindspore.ops import operations as P\n'), ((30007, 30020), 'mindspore.ops.operations.ReduceMax', 'P.ReduceMax', ([], {}), '()\n', (30018, 30020), True, 'from mindspore.ops import operations as P\n'), ((30051, 30068), 'mindspore.ops.operations.TensorSummary', 'P.TensorSummary', ([], {}), '()\n', (30066, 30068), True, 'from mindspore.ops import operations as P\n'), ((30099, 30116), 'mindspore.ops.operations.ScalarSummary', 'P.ScalarSummary', ([], {}), '()\n', (30114, 30116), True, 'from mindspore.ops import operations as P\n'), ((31804, 31821), 'numpy.arange', 'np.arange', (['length'], {}), '(length)\n', (31813, 31821), True, 'import numpy as np\n'), ((32339, 32427), 'numpy.pad', 'np.pad', (['signal', '[[0, 0], [0, channels % 2]]', '"""constant"""'], {'constant_values': '[0.0, 0.0]'}), "(signal, [[0, 0], [0, channels % 2]], 'constant', constant_values=[\n 0.0, 0.0])\n", (32345, 32427), True, 'import numpy as np\n'), ((32463, 32489), 'mindspore.Tensor', 'Tensor', (['signal', 'ms.float32'], {}), '(signal, ms.float32)\n', (32469, 32489), False, 'from mindspore import Tensor\n'), ((7516, 7538), 'mindspore.ops.functional.expand_dims', 'F.expand_dims', (['dis', '(-1)'], {}), '(dis, -1)\n', (7529, 7538), True, 'from mindspore.ops import functional as F\n'), ((31268, 31296), 'mindspore.ops.functional.cast', 'F.cast', (['(mask > 0)', 'ms.float32'], {}), '(mask > 0, ms.float32)\n', (31274, 31296), True, 'from mindspore.ops import functional as F\n'), ((32147, 32174), 'numpy.expand_dims', 'np.expand_dims', (['position', '(1)'], {}), '(position, 1)\n', (32161, 32174), True, 'import numpy as np\n'), ((32190, 32223), 'numpy.expand_dims', 'np.expand_dims', (['inv_timescales', '(0)'], {}), '(inv_timescales, 0)\n', (32204, 32223), True, 'import numpy as np\n'), ((3929, 3940), 'mindspore.common.initializer.Normal', 'Normal', (['(1.0)'], {}), '(1.0)\n', (3935, 3940), False, 'from mindspore.common.initializer import Normal\n'), ((7246, 7263), 'mindspore.ops.functional.ones_like', 'F.ones_like', (['r_ij'], {}), '(r_ij)\n', (7257, 7263), True, 'from mindspore.ops import functional as F\n'), ((31007, 31041), 'mindspore.ops.functional.select', 'F.select', (['atom_mask', 'r_ii', 'r_large'], {}), '(atom_mask, r_ii, r_large)\n', (31015, 31041), True, 'from mindspore.ops import functional as F\n'), ((32271, 32290), 'numpy.sin', 'np.sin', (['scaled_time'], {}), '(scaled_time)\n', (32277, 32290), True, 'import numpy as np\n'), ((32292, 32311), 'numpy.cos', 'np.cos', (['scaled_time'], {}), '(scaled_time)\n', (32298, 32311), True, 'import numpy as np\n'), ((22044, 22055), 'mindspore.common.initializer.Normal', 'Normal', (['(1.0)'], {}), '(1.0)\n', (22050, 22055), False, 'from mindspore.common.initializer import Normal\n'), ((30960, 30977), 'mindspore.ops.functional.ones_like', 'F.ones_like', (['r_ii'], {}), '(r_ii)\n', (30971, 30977), True, 'from mindspore.ops import functional as F\n'), ((32054, 32079), 'numpy.arange', 'np.arange', (['num_timescales'], {}), '(num_timescales)\n', (32063, 32079), True, 'import numpy as np\n')] |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six import with_metaclass
from six.moves import range
import os
import numpy as np
import collections
import itertools
import functools
import contextlib
from threading import Lock
from .frame import Frame
from abc import ABCMeta, abstractmethod, abstractproperty
from functools import wraps
from warnings import warn
class FramesStream(with_metaclass(ABCMeta, object)):
"""
A base class for wrapping input data which knows how to
advance to the next frame, but does not have random access.
The length does not need to be finite.
Does not support slicing.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __iter__(self):
pass
@abstractproperty
def pixel_type(self):
"""Returns a numpy.dtype for the data type of the pixel values"""
pass
@abstractproperty
def frame_shape(self):
"""Returns the shape of a single frame as a tuple ex (10, 12)"""
pass
@classmethod
def class_exts(cls):
"""
Return a set of the file extensions that this reader can deal with.
Sub-classes should over-ride this function to list what extensions
they deal with.
The default interpretation of the returned set is 'file
extensions including but not exclusively'.
"""
return set()
@property
def exts(self):
"""
Property to get the extensions of a FramesStream class.
Calls relevant classmethod.
"""
return type(self).class_ext()
def close(self):
"""
A method to clean up anything that need to be cleaned up.
Sub-classes should use super to call up the MRO stack and then
do any class-specific clean up
"""
pass
def _validate_process_func(self, process_func):
if process_func is None:
process_func = lambda x: x
if not callable(process_func):
raise ValueError("process_func must be a function, or None")
self.process_func = process_func
def _as_grey(self, as_grey, process_func):
# See skimage.color.colorconv in the scikit-image project.
# As noted there, the weights used in this conversion are calibrated
# for contemporary CRT phosphors. Any alpha channel is ignored."""
if as_grey:
if process_func is not None:
raise ValueError("The as_grey option cannot be used when "
"process_func is specified. Incorpate "
"greyscale conversion in the function "
"passed to process_func.")
shape = self.frame_shape
ndim = len(shape)
# Look for dimensions that look like color channels.
rgb_like = shape.count(3) == 1
rgba_like = shape.count(4) == 1
if ndim == 2:
# The image is already greyscale.
process_func = None
elif ndim == 3 and (rgb_like or rgba_like):
reduced_shape = list(shape)
if rgb_like:
color_axis_size = 3
calibration = [0.2125, 0.7154, 0.0721]
else:
color_axis_size = 4
calibration = [0.2125, 0.7154, 0.0721, 0]
reduced_shape.remove(color_axis_size)
self._im_sz = tuple(reduced_shape)
def convert_to_grey(img):
color_axis = img.shape.index(color_axis_size)
img = np.rollaxis(img, color_axis, 3)
grey = (img * calibration).sum(2)
return grey.astype(img.dtype) # coerce to original dtype
self.process_func = convert_to_grey
else:
raise NotImplementedError("I don't know how to convert an "
"image of shaped {0} to greyscale. "
"Write you own function and pass "
"it using the process_func "
"keyword argument.".format(shape))
# magic functions to make all sub-classes usable as context managers
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __repr__(self):
# May be overwritten by subclasses
return """<Frames>
Frame Shape: {w} x {h}
Pixel Datatype: {dtype}""".format(w=self.frame_shape[0],
h=self.frame_shape[1],
dtype=self.pixel_type)
class SliceableIterable(object):
def __init__(self, ancestor, indices, length=None):
"""A generator that supports fancy indexing
When sliced using any iterable with a known length, it return another
object like itself, a SliceableIterable. When sliced with an integer,
it returns the data payload.
Also, this retains the attributes of the ultimate ancestor that
created it (or its parent, or its parent's parent, ...).
Parameters
----------
ancestor : object
must support __getitem__ with an integer argument
indices : iterable
giving indices into `ancestor`
length : integer, optional
length of indicies
This is required if `indices` is a generator,
that is, if `len(indices)` is invalid
Examples
--------
# Slicing on a SliceableIterable returns another SliceableIterable...
>>> v = SliceableIterable([0, 1, 2, 3], range(4), 4)
>>> v1 = v[:2]
>>> type(v[:2])
SliceableIterable
>>> v2 = v[::2]
>>> type(v2)
SliceableIterable
>>> v2[0]
0
# ...unless the slice itself has an unknown length, which makes
# slicing impossible.
>>> v3 = v2((i for i in [0])) # argument is a generator
>>> type(v3)
generator
"""
if length is None:
try:
length = len(indices)
except TypeError:
raise ValueError("The length parameter is required in this "
"case because len(indices) is not valid.")
self._len = length
self._ancestor = ancestor
self._indices = indices
self._counter = 0
self._proc_func = lambda image: image
@property
def indices(self):
# Advancing indices won't affect this new copy of self._indices.
indices, self._indices = itertools.tee(iter(self._indices))
return indices
def _get(self, key):
"Wrap ancestor's get_frame method in a processing function."
return self._proc_func(self._ancestor[key])
def __repr__(self):
msg = "Sliced and/or processed {0}. Original repr:\n".format(
type(self._ancestor).__name__)
old = '\n'.join(" " + ln for ln in repr(self._ancestor).split('\n'))
return msg + old
def __iter__(self):
return (self._get(i) for i in self.indices)
def __len__(self):
return self._len
def __getattr__(self, key):
# Remember this only gets called if __getattribute__ raises an
# AttributeError. Try the ancestor object.
att = getattr(self._ancestor, key)
if hasattr(self._ancestor, '_use_indices') and callable(att):
# FramesSequenceMappable objects support frame number mapping
# within ancestor methods
@functools.wraps(att)
def mapped(*args, **kwargs):
with self._ancestor._use_indices(self.indices):
return att(*args, **kwargs)
return mapped
else:
return att
def __getitem__(self, key):
"""for data access"""
_len = len(self)
abs_indices = self.indices
if isinstance(key, slice):
# if input is a slice, return another SliceableIterable
start, stop, step = key.indices(_len)
rel_indices = range(start, stop, step)
new_length = len(rel_indices)
indices = _index_generator(rel_indices, abs_indices)
return SliceableIterable(self._ancestor, indices, new_length)
elif isinstance(key, collections.Iterable):
# if the input is an iterable, doing 'fancy' indexing
if isinstance(key, np.ndarray) and key.dtype == np.bool:
# if we have a bool array, set up masking but defer
# the actual computation, returning another SliceableIterable
rel_indices = np.arange(len(self))[key]
indices = _index_generator(rel_indices, abs_indices)
new_length = key.sum()
return SliceableIterable(self._ancestor, indices, new_length)
if any(_k < -_len or _k >= _len for _k in key):
raise IndexError("Keys out of range")
try:
new_length = len(key)
except TypeError:
# The key is a generator; return a plain old generator.
# Without knowing the length of the *key*,
# we can't give a SliceableIterable
gen = (self[_k if _k >= 0 else _len + _k] for _k in key)
return gen
else:
# The key is a list of in-range values. Build another
# SliceableIterable, again deferring computation.
rel_indices = ((_k if _k >= 0 else _len + _k) for _k in key)
indices = _index_generator(rel_indices, abs_indices)
return SliceableIterable(self._ancestor, indices, new_length)
else:
if key < -_len or key >= _len:
raise IndexError("Key out of range")
try:
abs_key = self._indices[key]
except TypeError:
key = key if key >= 0 else _len + key
rel_indices, self._indices = itertools.tee(self._indices)
for _, i in zip(range(key + 1), rel_indices):
abs_key = i
return self._get(abs_key)
def close(self):
"Closing this child slice of the original reader does nothing."
pass
class FramesSequence(FramesStream):
"""Baseclass for wrapping data buckets that have random access.
Support random access.
Supports standard slicing and fancy slicing, but returns a
generator.
Must be finite length.
"""
def __getitem__(self, key):
"""If getting a scalar, a specific frame, call get_frame. Otherwise,
be 'lazy' and defer to the slicing logic of SliceableIterable."""
if isinstance(key, int):
i = key if key >= 0 else len(self) + key
return self.get_frame(i)
else:
return SliceableIterable(self, range(len(self)), len(self))[key]
def __iter__(self):
return iter(self[:])
@abstractmethod
def __len__(self):
"""
It is obligatory that sub-classes define a length.
"""
pass
@abstractmethod
def get_frame(self, ind):
"""
Sub classes must over-ride this function for how to get a given
frame out of the file. Any data-type specific internal-state
nonsense should be dealt with in this function.
"""
pass
def __repr__(self):
# May be overwritten by subclasses
return """<Frames>
Length: {count} frames
Frame Shape: {w} x {h}
Pixel Datatype: {dtype}""".format(w=self.frame_shape[0],
h=self.frame_shape[1],
count = len(self),
dtype=self.pixel_type)
class FramesSequenceMappable(FramesSequence):
"""Version of FramesSequence that allows SliceableIterable objects
to temporarily remap its indices. This is to allow methods like get_time()
to use the same index mapping that's used for frame images.
This feature is nominally thread-safe.
"""
def __init__(self):
super(FramesSequenceMappable, self).__init__()
# Allow frame numbers to be temporarily re-mapped.
self._indexing_lock = Lock()
self._indices = None
@contextlib.contextmanager
def _use_indices(self, indices=None):
"""Context manager to temporarily re-assign indices.
Only affects methods that are index-aware.
"""
self._indexing_lock.acquire()
try:
self._indices = list(indices)
yield
finally:
self._indices = None
self._indexing_lock.release()
def _all_indices(self):
"""Returns iterable of all indices.
Affected by _use_indices()
"""
if self._indices is None:
return range(len(self))
else:
return self._indices[:]
def _map_index(self, index):
"""Returns absolute frame number corresponding to supplied index.
Affected by _use_indices()
"""
if self._indices is None:
return index
else:
return self._indices[index]
class FrameRewindableStream(FramesStream):
"""
A base class for holding the common code for
wrapping data sources that do not rewind easily.
"""
@abstractmethod
def rewind(self, j=0):
"""
Resets the stream to frame j
j : int
Frame to rewind the stream to
"""
pass
@abstractmethod
def skip_forward(self, j):
"""
Skip the stream forward by j frames.
j : int
Number of frames to skip
"""
pass
@abstractmethod
def next(self):
"""
return the next frame in the stream
"""
pass
@abstractmethod
def __len__(self):
pass
@abstractproperty
def current(self):
"""
The current location in the stream.
Can be an int if in stream or None if out the end.
"""
pass
def __iter__(self):
self.rewind(0)
return self
def __getitem__(self, arg):
"""
Returns a generator which yields frames
"""
if isinstance(arg, slice):
# get value from slice
start, stop, step = arg.start, arg.stop, arg.step
# sanitize step
if step is None:
step = 1
if step < 1:
raise ValueError("step must be positive")
# make sure the stream is in the right place to start
if start is None:
start = 0
if start < self.current:
self.rewind(start)
if start > self.current:
self.skip_forward(start - self.current)
# sanity check
if stop is not None and stop < start:
raise ValueError("start must be less than stop")
# special case, we can't just return self, because __iter__ rewinds
if step == 1 and stop is None:
# keep going until exhausted
return (self.next() for _ in itertools.repeat(True))
return self._step_gen(step, stop)
elif isinstance(arg, int):
self.rewind(arg)
return self.next()
else:
raise ValueError("Invalid arguement, use either a `slice` or " +
"or an `int`. not {t}".format(t=str(type(arg))))
def _step_gen(self, step, stop):
"""
Wraps up the logic of stepping forward by step > 1
"""
while stop is None or self.current < stop:
yield self.next()
self.skip_forward(step - 1)
else:
raise StopIteration
def __repr__(self):
# May be overwritten by subclasses
return """<Frames>
Length: {count} frames
Frame Shape: {w} x {h}
Pixel Datatype: {dtype}""".format(w=self.frame_shape[0],
h=self.frame_shape[1],
count = len(self),
dtype=self.pixel_type)
def _index_generator(new_indices, old_indices):
"""Find locations of new_indicies in the ref. frame of the old_indices.
Example: (1, 3), (1, 3, 5, 10) -> (3, 10)
The point of all this trouble is that this is done lazily, returning
a generator without actually looping through the inputs."""
# Use iter() to be safe. On a generator, this returns an identical ref.
new_indices = iter(new_indices)
n = next(new_indices)
last_n = None
done = False
while True:
old_indices_, old_indices = itertools.tee(iter(old_indices))
for i, o in enumerate(old_indices_):
# If new_indices is not strictly monotonically increasing, break
# and start again from the beginning of old_indices.
if last_n is not None and n <= last_n:
last_n = None
break
if done:
raise StopIteration
if i == n:
last_n = n
try:
n = next(new_indices)
except StopIteration:
done = True
# Don't stop yet; we still have one last thing to yield.
yield o
else:
continue
def pipeline(func):
"""Decorator to make function aware of pims objects.
When the function is applied to a pims reader or a slice of one, it
returns another lazily-evaluated, sliceable object.
When the function is applied to any other object, it falls back on its
normal behavhior.
Parameters
----------
func : callable
function that accepts an image as its first argument
Returns
-------
processed_images : pims.SliceableIterator
Example
-------
Apply the pipeline decorator to your image processing function.
>>> @pipeline
... def color_channel(image, channel):
... return image[channel, :, :]
...
Load images with PIMS.
>>> images = pims.ImageSequence(...)
Passing the PIMS class to the function return another PIMS object
that "lazily" applies the function when the images come out. Different
functions can be applied to the same underlying images, creating
independent objects.
>>> red_images = color_channel(images, 0)
>>> green_images = color_channel(images, 1)
Pipeline functions can also be composed.
>>> @pipeline
... def rescale(image):
... return (image - image.min())/image.ptp()
...
>>> rescale(color_channel(images, 0))
The function can still be applied to ordinary images. The decorator
only takes affect when a PIMS object is passed.
>>> single_img = images[0]
>>> red_img = red_channel(single_img) # normal behavior
"""
@wraps(func)
def process(img_or_iterable, *args, **kwargs):
if isinstance(img_or_iterable, (SliceableIterable, FramesSequence)):
_len = len(img_or_iterable)
s = SliceableIterable(img_or_iterable, range(_len), _len)
s._proc_func = lambda image: func(image, *args, **kwargs)
return s
else:
# Fall back on normal behavior of func, interpreting input
# as a single image.
return func(img_or_iterable)
if process.__doc__ is None:
process.__doc__ = ''
process.__doc__ = ("This function has been made pims-aware. When passed\n"
"a pims reader or SliceableIterable, it will return a \n"
"new SliceableIterable of the results. When passed \n"
"other objects, its behavior is "
"unchanged.\n\n") + process.__doc__
return process
class FramesSequenceND(FramesSequence):
""" A base class defining a FramesSequence with an arbitrary number of
axes. In the context of this reader base class, dimensions like 'x', 'y',
't' and 'z' will be called axes. Indices along these axes will be called
coordinates.
The properties `bundle_axes`, `iter_axes`, and `default_coords` define
to which coordinates each index points. See below for a description of
each attribute.
Subclassed readers only need to define `get_frame_2D`, `pixel_type` and
`__init__`. In the `__init__`, at least axes y and x need to be
initialized using `_init_axis(name, size)`.
The attributes `__len__`, `frame_shape`, and `get_frame` are defined by
this base_class; these are not meant to be changed.
Attributes
----------
axes : list of strings
List of all available axes
ndim : int
Number of image axes
sizes : dict of int
Dictionary with all axis sizes
frame_shape : tuple of int
Shape of frames that will be returned by get_frame
iter_axes : iterable of strings
This determines which axes will be iterated over by the FramesSequence.
The last element in will iterate fastest. x and y are not allowed.
bundle_axes : iterable of strings
This determines which axes will be bundled into one Frame. The axes in
the ndarray that is returned by get_frame have the same order as the
order in this list. The last two elements have to be ['y', 'x'].
default_coords: dict of int
When a dimension is not present in both iter_axes and bundle_axes, the
coordinate contained in this dictionary will be used.
Examples
--------
>>> class MDummy(FramesSequenceND):
... @property
... def pixel_type(self):
... return 'uint8'
... def __init__(self, shape, **axes):
... self._init_axis('y', shape[0])
... self._init_axis('x', shape[1])
... for name in axes:
... self._init_axis(name, axes[name])
... def get_frame_2D(self, **ind):
... return np.zeros((self.sizes['y'], self.sizes['x']),
... dtype=self.pixel_type)
>>> frames = MDummy((64, 64), t=80, c=2, z=10, m=5)
>>> frames.bundle_axes = 'czyx'
>>> frames.iter_axes = 't'
>>> frames.default_coords['m'] = 3
>>> frames[5] # returns Frame at T=5, M=3 with shape (2, 10, 64, 64)
"""
def _clear_axes(self):
self._sizes = {}
self._default_coords = {}
self._iter_axes = []
self._bundle_axes = ['y', 'x']
def _init_axis(self, name, size, default=0):
# check if the axes have been initialized, if not, do it here
if not hasattr(self, '_sizes'):
self._clear_axes()
elif name in self._sizes:
raise ValueError("dimension '{}' already exists".format(name))
self._sizes[name] = int(size)
if not (name == 'x' or name == 'y'):
self.default_coords[name] = int(default)
def __len__(self):
return int(np.prod([self._sizes[d] for d in self._iter_axes]))
@property
def frame_shape(self):
""" Returns the shape of the frame as returned by get_frame. """
return tuple([self._sizes[d] for d in self._bundle_axes])
@property
def axes(self):
""" Returns a list of all axes. """
return [k for k in self._sizes]
@property
def ndim(self):
""" Returns the number of axes. """
return len(self._sizes)
@property
def sizes(self):
""" Returns a dict of all axis sizes. """
return self._sizes
@property
def bundle_axes(self):
""" This determines which dimensions will be bundled into one Frame.
The ndarray that is returned by get_frame has the same dimension order
as the order of `bundle_axes`.
The last two elements have to be ['y', 'x'].
"""
return self._bundle_axes
@bundle_axes.setter
def bundle_axes(self, value):
invalid = [k for k in value if k not in self._sizes]
if invalid:
raise ValueError("axes %r do not exist" % invalid)
if len(value) < 2 or not (value[-1] == 'x' and value[-2] == 'y'):
raise ValueError("bundle_axes should end with ['y', 'x']")
for k in value:
if k in self._iter_axes:
del self._iter_axes[self._iter_axes.index(k)]
self._bundle_axes = list(value)
@property
def iter_axes(self):
""" This determines which axes will be iterated over by the
FramesSequence. The last element will iterate fastest.
x and y are not allowed. """
return self._iter_axes
@iter_axes.setter
def iter_axes(self, value):
invalid = [k for k in value if k not in self._sizes]
if invalid:
raise ValueError("axes %r do not exist" % invalid)
if 'x' in value or 'y' in value:
raise ValueError("axes 'y' and 'x' cannot be iterated")
for k in value:
if k in self._bundle_axes:
del self._bundle_axes[self._bundle_axes.index(k)]
self._iter_axes = list(value)
@property
def default_coords(self):
""" When a axis is not present in both iter_axes and bundle_axes, the
coordinate contained in this dictionary will be used. """
return self._default_coords
@default_coords.setter
def default_coords(self, value):
invalid = [k for k in value if k not in self._sizes]
if invalid:
raise ValueError("axes %r do not exist" % invalid)
self._default_coords.update(**value)
@abstractmethod
def get_frame_2D(self, **ind):
""" The actual frame reader, defined by the subclassed reader.
This method should take exactly one keyword argument per axis,
reflecting the coordinate along each axis. It returns a two dimensional
ndarray with shape (sizes['y'], sizes['x']) and dtype `pixel_type`. It
may also return a Frame object, so that metadata will be propagated. It
will only propagate metadata if every bundled frame gives the same
fields.
"""
pass
def get_frame(self, i):
""" Returns a Frame of shape deterimend by bundle_axes. The index value
is interpreted according to the iter_axes property. Coordinates not
present in both iter_axes and bundle_axes will be set to their default
value (see default_coords). """
if i > len(self):
raise IndexError('index out of range')
# start with the default coordinates
coords = self._default_coords.copy()
# list sizes of iterate dimensions
iter_sizes = [self._sizes[k] for k in self._iter_axes]
# list how much i has to increase to get an increase of coordinate n
iter_cumsizes = np.append(np.cumprod(iter_sizes[::-1])[-2::-1], 1)
# calculate the coordinates and update the coords dictionary
iter_coords = (i // iter_cumsizes) % iter_sizes
coords.update(**{k: v for k, v in zip(self._iter_axes, iter_coords)})
shape = self.frame_shape
if len(shape) == 2: # simple case of only one frame
result = self.get_frame_2D(**coords)
if hasattr(result, 'metadata'):
metadata = result.metadata
else:
metadata = None
else: # general case of N dimensional frame
Nframes = int(np.prod(shape[:-2]))
result = np.empty([Nframes] + list(shape[-2:]),
dtype=self.pixel_type)
# read all 2D frames and properly iterate through the coordinates
mdlist = [{}] * Nframes
for n in range(Nframes):
frame = self.get_frame_2D(**coords)
result[n] = frame
if hasattr(frame, 'metadata'):
mdlist[n] = frame.metadata
for dim in self._bundle_axes[-3::-1]:
coords[dim] += 1
if coords[dim] >= self._sizes[dim]:
coords[dim] = 0
else:
break
# reshape the array into the desired shape
result.shape = shape
# propagate metadata
metadata = {}
if not np.all([md == {} for md in mdlist]):
keys = mdlist[0].keys()
for k in keys:
try:
metadata[k] = [row[k] for row in mdlist]
except KeyError:
# if a field is not present in every frame, ignore it
warn('metadata field {} is not propagated')
else:
# if all values are equal, only return one value
if metadata[k][1:] == metadata[k][:-1]:
metadata[k] = metadata[k][0]
else: # cast into ndarray
metadata[k] = np.array(metadata[k])
metadata[k].shape = shape[:-2]
return Frame(result, frame_no=i, metadata=metadata)
def __repr__(self):
s = "<FramesSequenceND>\nDimensions: {0}\n".format(self.ndim)
for dim in self._sizes:
s += "Dimension '{0}' size: {1}\n".format(dim, self._sizes[dim])
s += """Pixel Datatype: {dtype}""".format(dtype=self.pixel_type)
return s
| [
"itertools.repeat",
"numpy.cumprod",
"six.moves.range",
"numpy.all",
"threading.Lock",
"numpy.array",
"functools.wraps",
"numpy.rollaxis",
"itertools.tee",
"warnings.warn",
"numpy.prod",
"six.with_metaclass"
] | [((465, 496), 'six.with_metaclass', 'with_metaclass', (['ABCMeta', 'object'], {}), '(ABCMeta, object)\n', (479, 496), False, 'from six import with_metaclass\n'), ((19188, 19199), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (19193, 19199), False, 'from functools import wraps\n'), ((12477, 12483), 'threading.Lock', 'Lock', ([], {}), '()\n', (12481, 12483), False, 'from threading import Lock\n'), ((7752, 7772), 'functools.wraps', 'functools.wraps', (['att'], {}), '(att)\n', (7767, 7772), False, 'import functools\n'), ((8293, 8317), 'six.moves.range', 'range', (['start', 'stop', 'step'], {}), '(start, stop, step)\n', (8298, 8317), False, 'from six.moves import range\n'), ((23246, 23296), 'numpy.prod', 'np.prod', (['[self._sizes[d] for d in self._iter_axes]'], {}), '([self._sizes[d] for d in self._iter_axes])\n', (23253, 23296), True, 'import numpy as np\n'), ((27985, 27999), 'six.moves.range', 'range', (['Nframes'], {}), '(Nframes)\n', (27990, 27999), False, 'from six.moves import range\n'), ((19419, 19430), 'six.moves.range', 'range', (['_len'], {}), '(_len)\n', (19424, 19430), False, 'from six.moves import range\n'), ((27111, 27139), 'numpy.cumprod', 'np.cumprod', (['iter_sizes[::-1]'], {}), '(iter_sizes[::-1])\n', (27121, 27139), True, 'import numpy as np\n'), ((27715, 27734), 'numpy.prod', 'np.prod', (['shape[:-2]'], {}), '(shape[:-2])\n', (27722, 27734), True, 'import numpy as np\n'), ((28591, 28628), 'numpy.all', 'np.all', (['[(md == {}) for md in mdlist]'], {}), '([(md == {}) for md in mdlist])\n', (28597, 28628), True, 'import numpy as np\n'), ((3696, 3727), 'numpy.rollaxis', 'np.rollaxis', (['img', 'color_axis', '(3)'], {}), '(img, color_axis, 3)\n', (3707, 3727), True, 'import numpy as np\n'), ((10235, 10263), 'itertools.tee', 'itertools.tee', (['self._indices'], {}), '(self._indices)\n', (10248, 10263), False, 'import itertools\n'), ((15435, 15457), 'itertools.repeat', 'itertools.repeat', (['(True)'], {}), '(True)\n', (15451, 15457), False, 'import itertools\n'), ((10296, 10310), 'six.moves.range', 'range', (['(key + 1)'], {}), '(key + 1)\n', (10301, 10310), False, 'from six.moves import range\n'), ((28928, 28971), 'warnings.warn', 'warn', (['"""metadata field {} is not propagated"""'], {}), "('metadata field {} is not propagated')\n", (28932, 28971), False, 'from warnings import warn\n'), ((29285, 29306), 'numpy.array', 'np.array', (['metadata[k]'], {}), '(metadata[k])\n', (29293, 29306), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Utility functions for reading and writing tables
"""
import os
import copy
import numpy as np
import sqlite3
from sqlalchemy import create_engine
from astropy.io import fits
import pandas as pd
__all__ = ['to_csv', 'export_db']
def to_csv(truth_db_path, dest_dir='.', table_suffix=''):
"""Dumps sqlite3 files of the truth tables as csv files
Parameters
----------
truth_db_path : str
Path to the truth tables Bryce made, either of the lens or the host
"""
db = sqlite3.connect(truth_db_path)
cursor = db.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = cursor.fetchall()
for table_name in tables:
table_name = table_name[0]
table = pd.read_sql_query("SELECT * from %s" % table_name, db)
table.to_csv(os.path.join(dest_dir, table_name + '{:s}.csv'.format(table_suffix)), index=False)
cursor.close()
db.close()
def export_db(dataframe, out_dir, out_fname, table_name, overwrite=False):
"""Export a DB from a Pandas DataFrame
Parameters
----------
dataframe : Pandas.DataFrame object
out_fname : str
table_name : str
overwrite_existing : bool
"""
out_path = os.path.join(out_dir, out_fname)
if overwrite is True and os.path.exists(out_path):
os.remove(out_path)
engine = create_engine('sqlite:///{:s}'.format(out_path), echo=False)
dataframe.to_sql(table_name, con=engine, index=False)
engine.dispose()
return None
def boundary_max(data):
"""Get the maximum pixel value along the four boundaries of the given image
"""
ny, nx = data.shape
boundary = np.concatenate((data[:, 0], data[:, -1], data[0, :],
data[-1, :]))
return np.max(boundary)
def write_fits_stamp(data, magnorms, lens_id, galaxy_type, pixel_scale, outfile, overwrite=True,
underflow_frac=1e-12):
"""Write the given image as a fits stamp with relevant metadata
Parameters
----------
data : np.array
the image to export
magnorms : dict
the normalizing magnitude with ugrizy keys
lens_id : int
the dc2_sys_id for this lensing system
galaxy_type : str
the galaxy component type ('bulge' or 'disk')
pixel_scale : float
outfile : output file path
overwrite : bool
underflow_frac: float [1e-12]
Set pixels to zero when they have values < underflow_frac*np.sum(data)
"""
boundary_ratio = boundary_max(data)/np.max(data)
if boundary_ratio > 1e-2:
print(f'(boundary max/data max) = {boundary_ratio:.2e} '
f'for {galaxy_type} {lens_id}')
for magnorm in magnorms.values():
if not np.isfinite(magnorm):
raise RuntimeError(f'non-finite magnorm for {lens_id}')
os.makedirs(os.path.dirname(os.path.abspath(outfile)), exist_ok=True)
output = fits.HDUList(fits.PrimaryHDU())
output[0].data = copy.deepcopy(data)
output[0].data[data < underflow_frac*np.sum(data)] = 0
output[0].header.set('LENS_ID', lens_id, 'Lens system ID')
output[0].header.set('GALTYPE', galaxy_type, 'Galaxy component type')
for band, magnorm in magnorms.items():
output[0].header.set(f'MAGNORM{band.upper()}', magnorm,
f'magnorm for {band}-band')
output[0].header.set('PIXSCALE', pixel_scale, 'pixel scale in arcseconds')
output.writeto(outfile, overwrite=overwrite)
| [
"copy.deepcopy",
"os.remove",
"os.path.abspath",
"numpy.sum",
"astropy.io.fits.PrimaryHDU",
"os.path.exists",
"numpy.isfinite",
"numpy.max",
"sqlite3.connect",
"pandas.read_sql_query",
"os.path.join",
"numpy.concatenate"
] | [((526, 556), 'sqlite3.connect', 'sqlite3.connect', (['truth_db_path'], {}), '(truth_db_path)\n', (541, 556), False, 'import sqlite3\n'), ((1245, 1277), 'os.path.join', 'os.path.join', (['out_dir', 'out_fname'], {}), '(out_dir, out_fname)\n', (1257, 1277), False, 'import os\n'), ((1683, 1749), 'numpy.concatenate', 'np.concatenate', (['(data[:, 0], data[:, -1], data[0, :], data[-1, :])'], {}), '((data[:, 0], data[:, -1], data[0, :], data[-1, :]))\n', (1697, 1749), True, 'import numpy as np\n'), ((1792, 1808), 'numpy.max', 'np.max', (['boundary'], {}), '(boundary)\n', (1798, 1808), True, 'import numpy as np\n'), ((2986, 3005), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (2999, 3005), False, 'import copy\n'), ((767, 821), 'pandas.read_sql_query', 'pd.read_sql_query', (["('SELECT * from %s' % table_name)", 'db'], {}), "('SELECT * from %s' % table_name, db)\n", (784, 821), True, 'import pandas as pd\n'), ((1307, 1331), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (1321, 1331), False, 'import os\n'), ((1341, 1360), 'os.remove', 'os.remove', (['out_path'], {}), '(out_path)\n', (1350, 1360), False, 'import os\n'), ((2549, 2561), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (2555, 2561), True, 'import numpy as np\n'), ((2946, 2963), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (2961, 2963), False, 'from astropy.io import fits\n'), ((2756, 2776), 'numpy.isfinite', 'np.isfinite', (['magnorm'], {}), '(magnorm)\n', (2767, 2776), True, 'import numpy as np\n'), ((2878, 2902), 'os.path.abspath', 'os.path.abspath', (['outfile'], {}), '(outfile)\n', (2893, 2902), False, 'import os\n'), ((3047, 3059), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (3053, 3059), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import time
import InstrumentDriver
import numpy as np
import sys
from pathlib import Path
sys.path.append(str((Path(__file__).parents[1]/'QuantumCTek'/'lib').resolve()))
import device_interface
import write_configuration as CONST
dev = device_interface.DeviceInterface()
class Driver(InstrumentDriver.InstrumentWorker):
""" This class implements the QuantumCTek AWG driver"""
def performOpen(self, options={}):
"""Perform the operation of opening the instrument connection"""
import json
with open(Path(__file__).parent/'offset_config.json', 'r') as f:
offset_dict = json.load(f)
offsets = offset_dict[self.comCfg.address]
self.default_offset = [offsets[2], offsets[3], offsets[0], offsets[1]]
ret = 0
ret |= dev.da_connect_device(self.comCfg.name, self.comCfg.address, None, offsets)
ret |= dev.da_init_device(self.comCfg.name)
if ret != 0:
raise Exception(f'da board:[{self.comCfg.name}] connect or init failure, ret:[{ret}]')
self.initSetConfig()
def performClose(self, bError=False, options={}):
"""Perform the close instrument connection operation"""
_ = dev.da_disconnect_device(self.comCfg.name)
def initSetConfig(self):
"""This function is run before setting values in Set Config"""
self.waveform = [None] * CONST.MAX_CHANNELS
self.update_waveform = [False] * CONST.MAX_CHANNELS
self.output = [False] * CONST.MAX_CHANNELS
# stop all outputs
self._setValue(dev.da_stop_output_wave, self.comCfg.name, 0)
def _setValue(self, _func, *args):
"""Call API and check returned value"""
ret = _func(*args)
if ret != 0:
raise Exception(f'Error when calling {_func}: da board:[{self.comCfg.name}], ret:[{ret}]')
def _rescale(self, n, waveform):
"""Rescale and clip waveform for channel n, raise an error if clipping is not allowed."""
offset1 = self.default_offset[n-1]
offset2 = int(self.getValue(f'Data offset #{n}'))
code_max = 32768 - offset1 - offset2
code_min = -32767 - offset1 - offset2
scaled_v = np.array(np.round(32767*waveform), dtype=int)
if not self.getValue(f'Allow clipping #{n}'):
if np.any(scaled_v > code_max) or np.any(scaled_v < code_min):
raise Exception(f'Waveform #{n} overflows. Input range is {code_min/32767:f} -- {code_max/32767:f}.')
else:
scaled_v = np.clip(scaled_v, code_min, code_max)
return scaled_v
def _upload_waveform(self):
"""Upload waveform and enable output"""
for i in range(CONST.MAX_CHANNELS):
if self.update_waveform[i] and self.output[i] and (self.waveform[i] is not None):
self._setValue(dev.da_stop_output_wave, self.comCfg.name, f'Z{i+1}')
scaled_v = self._rescale(i+1, self.waveform[i])
mode = 1 if self.getValue(f'Continuous output #{i+1}') else 0
self._setValue(dev.da_write_wave, scaled_v, self.comCfg.name, f'Z{i+1}', 'i', mode, 0)
self._setValue(dev.da_start_output_wave, self.comCfg.name, f'Z{i+1}')
self.update_waveform[i] = False
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# if self.isFirstCall(options):
# # if not self.isHardwareTrig(options):
# # # single board mode
# # self._setValue(dev.set_multi_board, self.comCfg.name, 0)
# # else:
# # multi board mode
# # self._setValue(dev.set_multi_board, self.comCfg.name, 0)
if quant.name == 'Trigger delay':
self._setValue(dev.da_set_trigger_delay, self.comCfg.name, value)
elif quant.name == 'Output delay':
self._setValue(dev.da_set_da_output_delay, self.comCfg.name, value)
elif quant.name == 'Trigger interval':
self._setValue(dev.da_set_trigger_interval_l1, self.comCfg.name, value)
elif quant.name == 'Trigger count':
value = int(round(value))
self._setValue(dev.da_set_trigger_count_l1, self.comCfg.name, value)
elif quant.name == 'Run AWG':
if value:
interval = self.getValue('Trigger interval')
n = self.getValue('Trigger count')
self._setValue(dev.da_trigger_enable, self.comCfg.name)
# Wait output before receiving data
time.sleep(n*interval + 0.001)
# else:
# # stop all channel
# self._setValue(dev.da_stop_output_wave, self.comCfg.name, 0)
elif quant.name.startswith('Channel gain'):
value = int(round(value))
n = int(quant.name[-1])
self._setValue(dev.da_set_channel_gain, self.comCfg.name, f'Z{n}', value)
elif quant.name.startswith('Output'):
n = int(quant.name[-1])
self.output[n-1] = value
if value:
# Need to upload and start output
self.update_waveform[n-1] = True
else:
self._setValue(dev.da_stop_output_wave, self.comCfg.name, f'Z{n}')
elif quant.name.startswith('Data offset'):
value = int(round(value))
n = int(quant.name[-1])
offset1 = self.default_offset[n-1]
code_max = 32768 - offset1
code_min = -32767 - offset1
if value > code_max or value < code_min:
raise Exception(f'{quant.name} overflows. Input range is {code_min} -- {code_max}.')
# self._setValue(dev.da_set_data_offset, self.comCfg.name, f'Z{n}', int(round(value*32768)))
self._setValue(dev.da_set_channel_default_voltage, self.comCfg.name, f'Z{n}', 32768-value)
if self.waveform[n-1] is not None:
self.update_waveform[n-1] = True
elif quant.name.startswith('Waveform'):
n = int(quant.name[-1])
if ((self.waveform[n-1] is None) or
(len(self.waveform[n-1]) != len(value['y'])) or
np.any(self.waveform[n-1] != value['y'])):
self.waveform[n-1] = value['y']
self.update_waveform[n-1] = True
elif quant.name.startswith('Continuous output'):
n = int(quant.name[-1])
if self.waveform[n-1] is not None:
self.update_waveform[n-1] = True
if self.isFinalCall(options):
if np.any(self.update_waveform):
self._upload_waveform()
# run awg if not hardware triggered
# if not self.isHardwareTrig(options):
# self._setValue(dev.da_trigger_enable, self.comCfg.name)
return value
def performGetValue(self, quant, options={}):
"""Perform the Get Value instrument operation"""
value = quant.getValue()
return value
if __name__ == '__main__':
pass
| [
"json.load",
"numpy.clip",
"time.sleep",
"numpy.any",
"device_interface.DeviceInterface",
"pathlib.Path",
"numpy.round"
] | [((264, 298), 'device_interface.DeviceInterface', 'device_interface.DeviceInterface', ([], {}), '()\n', (296, 298), False, 'import device_interface\n'), ((641, 653), 'json.load', 'json.load', (['f'], {}), '(f)\n', (650, 653), False, 'import json\n'), ((2222, 2248), 'numpy.round', 'np.round', (['(32767 * waveform)'], {}), '(32767 * waveform)\n', (2230, 2248), True, 'import numpy as np\n'), ((2543, 2580), 'numpy.clip', 'np.clip', (['scaled_v', 'code_min', 'code_max'], {}), '(scaled_v, code_min, code_max)\n', (2550, 2580), True, 'import numpy as np\n'), ((6712, 6740), 'numpy.any', 'np.any', (['self.update_waveform'], {}), '(self.update_waveform)\n', (6718, 6740), True, 'import numpy as np\n'), ((2328, 2355), 'numpy.any', 'np.any', (['(scaled_v > code_max)'], {}), '(scaled_v > code_max)\n', (2334, 2355), True, 'import numpy as np\n'), ((2359, 2386), 'numpy.any', 'np.any', (['(scaled_v < code_min)'], {}), '(scaled_v < code_min)\n', (2365, 2386), True, 'import numpy as np\n'), ((560, 574), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (564, 574), False, 'from pathlib import Path\n'), ((136, 150), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (140, 150), False, 'from pathlib import Path\n'), ((4681, 4713), 'time.sleep', 'time.sleep', (['(n * interval + 0.001)'], {}), '(n * interval + 0.001)\n', (4691, 4713), False, 'import time\n'), ((6322, 6364), 'numpy.any', 'np.any', (["(self.waveform[n - 1] != value['y'])"], {}), "(self.waveform[n - 1] != value['y'])\n", (6328, 6364), True, 'import numpy as np\n')] |
import ftplib
import glob
import subprocess as sp
import csv
import numpy as np
import netCDF4 as nc4
import pygrib as pg
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import datetime
import scipy
import os
import sys
from mpl_toolkits.basemap import Basemap
from matplotlib.patches import Polygon
from matplotlib.colors import LinearSegmentedColormap
from scipy.spatial import Delaunay
from scipy.interpolate import LinearNDInterpolator
from shutil import copyfile
forecasthoursub = str(sys.argv[1])
levels = []
colors = []
with open('/gpfs_backup/stormtrack/jtradfor/ensemble_data/reference/N0Q_Color_Lookup.csv','r') as colorcsv:
colorreader = csv.reader(colorcsv,delimiter=',')
for line in colorreader:
if float(line[1])>=0 and float(line[1])<=60:
colorints = [int(i) for i in line[2:]]
colors.append((colorints))
levels.append(float(line[1]))
colors = np.array(colors)/255.0
cmap1 = LinearSegmentedColormap.from_list("my_colormap",colors,N=len(levels),gamma=1.0)
plt.figure(figsize=(16,9))
m = Basemap(projection='lcc',lat_0=5,lon_0=-100,llcrnrlon=-126,llcrnrlat=23,urcrnrlon=-63,urcrnrlat=50,resolution='h')
shp_info = m.readshapefile('/gpfs_backup/stormtrack/jtradfor/ensemble_data/reference/st99_d00','states',drawbounds=False)
ax = plt.gca()
for nshape,seg in enumerate(m.states):
poly = Polygon(seg,facecolor='white',edgecolor='white',zorder=1,linewidth=1)
poly2 = Polygon(seg,facecolor='none',edgecolor='black',zorder=3,linewidth=1)
ax.add_patch(poly)
ax.add_patch(poly2)
reflectivities = np.load('/gpfs_backup/stormtrack/jtradfor/ensemble_data/rawdata/sref/%s_creflect.npy' % (forecasthoursub))
reflectivities_mask = np.load('/gpfs_backup/stormtrack/jtradfor/ensemble_data/reference/sref_arw_mask.npy')
reflectivities_copy = []
for reflectivity in reflectivities:
reflectivity[reflectivities_mask] = np.nan
reflectivity[reflectivity<0] = 0.0
reflectivities_copy.append(reflectivity)
reflect_mean = np.mean(reflectivities_copy,axis=0)
reflect_mean[reflect_mean<=0] = np.nan
reflect_mean[reflect_mean>1000000] = np.nan
reflect_mean[reflect_mean>60] = 60.0
im = m.imshow(reflect_mean,zorder=2,aspect='equal',interpolation='none',cmap=cmap1,vmin=0,vmax=60.0)
cbar = plt.colorbar(im,fraction=0.023,ticks=[0,10,20,30,40,50,60])
cbar.ax.yaxis.set_tick_params(color='w')
cbar.ax.set_yticklabels([0,10,20,30,40,50,60],color='w')
plt.box(False)
meanfil = '/gpfs_backup/stormtrack/jtradfor/ensemble_data/wxenviz.github.io/uploads/outimages/sref/%s_R_mean.png' % (forecasthoursub)
plt.savefig(meanfil,facecolor='#101010',bbox_inches='tight',dpi=500)
plt.close()
| [
"matplotlib.pyplot.switch_backend",
"numpy.load",
"csv.reader",
"matplotlib.pyplot.close",
"matplotlib.pyplot.box",
"matplotlib.pyplot.colorbar",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"matplotlib.pyplot.gca",
"mpl_toolkits.basemap.Basemap",
"m... | [((154, 179), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (172, 179), True, 'import matplotlib.pyplot as plt\n'), ((1008, 1035), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (1018, 1035), True, 'import matplotlib.pyplot as plt\n'), ((1040, 1165), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'projection': '"""lcc"""', 'lat_0': '(5)', 'lon_0': '(-100)', 'llcrnrlon': '(-126)', 'llcrnrlat': '(23)', 'urcrnrlon': '(-63)', 'urcrnrlat': '(50)', 'resolution': '"""h"""'}), "(projection='lcc', lat_0=5, lon_0=-100, llcrnrlon=-126, llcrnrlat=23,\n urcrnrlon=-63, urcrnrlat=50, resolution='h')\n", (1047, 1165), False, 'from mpl_toolkits.basemap import Basemap\n'), ((1283, 1292), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1290, 1292), True, 'import matplotlib.pyplot as plt\n'), ((1548, 1662), 'numpy.load', 'np.load', (["('/gpfs_backup/stormtrack/jtradfor/ensemble_data/rawdata/sref/%s_creflect.npy'\n % forecasthoursub)"], {}), "(\n '/gpfs_backup/stormtrack/jtradfor/ensemble_data/rawdata/sref/%s_creflect.npy'\n % forecasthoursub)\n", (1555, 1662), True, 'import numpy as np\n'), ((1677, 1772), 'numpy.load', 'np.load', (['"""/gpfs_backup/stormtrack/jtradfor/ensemble_data/reference/sref_arw_mask.npy"""'], {}), "(\n '/gpfs_backup/stormtrack/jtradfor/ensemble_data/reference/sref_arw_mask.npy'\n )\n", (1684, 1772), True, 'import numpy as np\n'), ((1962, 1998), 'numpy.mean', 'np.mean', (['reflectivities_copy'], {'axis': '(0)'}), '(reflectivities_copy, axis=0)\n', (1969, 1998), True, 'import numpy as np\n'), ((2227, 2294), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'fraction': '(0.023)', 'ticks': '[0, 10, 20, 30, 40, 50, 60]'}), '(im, fraction=0.023, ticks=[0, 10, 20, 30, 40, 50, 60])\n', (2239, 2294), True, 'import matplotlib.pyplot as plt\n'), ((2385, 2399), 'matplotlib.pyplot.box', 'plt.box', (['(False)'], {}), '(False)\n', (2392, 2399), True, 'import matplotlib.pyplot as plt\n'), ((2535, 2606), 'matplotlib.pyplot.savefig', 'plt.savefig', (['meanfil'], {'facecolor': '"""#101010"""', 'bbox_inches': '"""tight"""', 'dpi': '(500)'}), "(meanfil, facecolor='#101010', bbox_inches='tight', dpi=500)\n", (2546, 2606), True, 'import matplotlib.pyplot as plt\n'), ((2604, 2615), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2613, 2615), True, 'import matplotlib.pyplot as plt\n'), ((667, 702), 'csv.reader', 'csv.reader', (['colorcsv'], {'delimiter': '""","""'}), "(colorcsv, delimiter=',')\n", (677, 702), False, 'import csv\n'), ((891, 907), 'numpy.array', 'np.array', (['colors'], {}), '(colors)\n', (899, 907), True, 'import numpy as np\n'), ((1341, 1414), 'matplotlib.patches.Polygon', 'Polygon', (['seg'], {'facecolor': '"""white"""', 'edgecolor': '"""white"""', 'zorder': '(1)', 'linewidth': '(1)'}), "(seg, facecolor='white', edgecolor='white', zorder=1, linewidth=1)\n", (1348, 1414), False, 'from matplotlib.patches import Polygon\n'), ((1420, 1492), 'matplotlib.patches.Polygon', 'Polygon', (['seg'], {'facecolor': '"""none"""', 'edgecolor': '"""black"""', 'zorder': '(3)', 'linewidth': '(1)'}), "(seg, facecolor='none', edgecolor='black', zorder=3, linewidth=1)\n", (1427, 1492), False, 'from matplotlib.patches import Polygon\n')] |
'''
A custom Keras layer to perform L2-normalization.
Copyright (C) 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
import keras.backend as K
from keras.engine.topology import InputSpec
from keras.engine.topology import Layer
class L2Normalization(Layer):
'''
Performs L2 normalization on the input tensor with a learnable scaling parameter
as described in the paper "Parsenet: Looking Wider to See Better" (see references)
and as used in the original SSD model.
Arguments:
gamma_init (int): The initial scaling parameter. Defaults to 20 following the
SSD paper.
Input shape:
4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'`
or `(batch, height, width, channels)` if `dim_ordering = 'tf'`.
Returns:
The scaled tensor. Same shape as the input tensor.
References:
http://cs.unc.edu/~wliu/papers/parsenet.pdf
'''
def __init__(self, gamma_init=20, **kwargs):
#if K.image_dim_ordering() == 'tf':
if K.image_data_format() == 'channels_last':
self.axis = 3
else:
self.axis = 1
self.gamma_init = gamma_init
super(L2Normalization, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
gamma = self.gamma_init * np.ones((input_shape[self.axis],))
self.gamma = K.variable(gamma, name='{}_gamma'.format(self.name))
self._trainable_weights = [self.gamma]
super(L2Normalization, self).build(input_shape)
def call(self, x, mask=None):
output = K.l2_normalize(x, self.axis)
return output * self.gamma
def get_config(self):
config = {
'gamma_init': self.gamma_init
}
base_config = super(L2Normalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| [
"keras.backend.l2_normalize",
"numpy.ones",
"keras.backend.image_data_format",
"keras.engine.topology.InputSpec"
] | [((2173, 2201), 'keras.backend.l2_normalize', 'K.l2_normalize', (['x', 'self.axis'], {}), '(x, self.axis)\n', (2187, 2201), True, 'import keras.backend as K\n'), ((1582, 1603), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (1601, 1603), True, 'import keras.backend as K\n'), ((1845, 1873), 'keras.engine.topology.InputSpec', 'InputSpec', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (1854, 1873), False, 'from keras.engine.topology import InputSpec\n'), ((1909, 1943), 'numpy.ones', 'np.ones', (['(input_shape[self.axis],)'], {}), '((input_shape[self.axis],))\n', (1916, 1943), True, 'import numpy as np\n')] |
from unittest import TestCase, TestLoader, TextTestRunner
import numpy as np
from bdpy.feature import normalize_feature
class TestUtilFeature(TestCase):
def test_normalize_feature_1d(self):
feat = np.random.rand(4096)
feat_mean0 = np.random.rand(1, 1)
feat_std0 = np.random.rand(1, 1)
ddof = 1
feat_mean_ch = np.mean(feat, axis=None, keepdims=True)
feat_mean_all = np.mean(feat, axis=None, keepdims=True)
feat_std_ch = np.std(feat, axis=None, ddof=ddof, keepdims=True)
feat_std_all = np.mean(np.std(feat, axis=None, ddof=ddof, keepdims=True), keepdims=True)
# Mean (channel-wise) + SD (channel-wise)
feat_valid = ((feat - feat_mean_ch) / feat_std_ch) * feat_std0 + feat_mean0
feat_test = normalize_feature(feat,
channel_axis=0, channel_wise_mean=True, channel_wise_std=True,
shift=feat_mean0, scale=feat_std0,
std_ddof=1)
np.testing.assert_array_equal(feat_test, feat_valid)
# Mean (channel-wise) + SD (all)
feat_valid = ((feat - feat_mean_ch) / feat_std_all) * feat_std0 + feat_mean0
feat_test = normalize_feature(feat,
channel_axis=0, channel_wise_mean=True, channel_wise_std=False,
shift=feat_mean0, scale=feat_std0,
std_ddof=1)
np.testing.assert_array_equal(feat_test, feat_valid)
# Mean (all) + SD (channel-wise)
feat_valid = ((feat - feat_mean_all) / feat_std_ch) * feat_std0 + feat_mean0
feat_test = normalize_feature(feat,
channel_axis=0, channel_wise_mean=False, channel_wise_std=True,
shift=feat_mean0, scale=feat_std0,
std_ddof=1)
np.testing.assert_array_equal(feat_test, feat_valid)
# Mean (all) + SD (all)
feat_valid = ((feat - feat_mean_all) / feat_std_all) * feat_std0 + feat_mean0
feat_test = normalize_feature(feat,
channel_axis=0, channel_wise_mean=False, channel_wise_std=False,
shift=feat_mean0, scale=feat_std0,
std_ddof=1)
np.testing.assert_array_equal(feat_test, feat_valid)
# Mean (channel-wise) + SD (channel-wise), self-mean shift
feat_valid = ((feat - feat_mean_ch) / feat_std_ch) * feat_std0 + feat_mean_ch
feat_test = normalize_feature(feat,
channel_axis=0, channel_wise_mean=True, channel_wise_std=True,
shift='self', scale=feat_std0,
std_ddof=1)
np.testing.assert_array_equal(feat_test, feat_valid)
# Mean (channel-wise) + SD (channel-wise), self-mean shift and self-SD scale
feat_valid = ((feat - feat_mean_ch) / feat_std_ch) * feat_std_ch + feat_mean_ch
feat_test = normalize_feature(feat,
channel_axis=0, channel_wise_mean=True, channel_wise_std=True,
shift='self', scale='self',
std_ddof=1)
np.testing.assert_array_equal(feat_test, feat_valid)
def test_normalize_feature_3d(self):
feat = np.random.rand(64, 16, 16)
feat_mean0 = np.random.rand(64, 1, 1)
feat_std0 = np.random.rand(64, 1, 1)
ddof = 1
feat_mean_ch = np.mean(feat, axis=(1, 2), keepdims=True)
feat_mean_all = np.mean(feat, axis=None, keepdims=True)
feat_std_ch = np.std(feat, axis=(1, 2), ddof=ddof, keepdims=True)
feat_std_all = np.mean(np.std(feat, axis=(1, 2), ddof=ddof, keepdims=True), keepdims=True)
axes_along = (1, 2)
# Mean (channel-wise) + SD (channel-wise)
feat_valid = ((feat - feat_mean_ch) / feat_std_ch) * feat_std0 + feat_mean0
feat_test = normalize_feature(feat,
channel_axis=0, channel_wise_mean=True, channel_wise_std=True,
shift=feat_mean0, scale=feat_std0,
std_ddof=1)
np.testing.assert_array_equal(feat_test, feat_valid)
# Mean (channel-wise) + SD (all)
feat_valid = ((feat - feat_mean_ch) / feat_std_all) * feat_std0 + feat_mean0
feat_test = normalize_feature(feat,
channel_axis=0, channel_wise_mean=True, channel_wise_std=False,
shift=feat_mean0, scale=feat_std0,
std_ddof=1)
np.testing.assert_array_equal(feat_test, feat_valid)
# Mean (all) + SD (channel-wise)
feat_valid = ((feat - feat_mean_all) / feat_std_ch) * feat_std0 + feat_mean0
feat_test = normalize_feature(feat,
channel_axis=0, channel_wise_mean=False, channel_wise_std=True,
shift=feat_mean0, scale=feat_std0,
std_ddof=1)
np.testing.assert_array_equal(feat_test, feat_valid)
# Mean (all) + SD (all)
feat_valid = ((feat - feat_mean_all) / feat_std_all) * feat_std0 + feat_mean0
feat_test = normalize_feature(feat,
channel_axis=0, channel_wise_mean=False, channel_wise_std=False,
shift=feat_mean0, scale=feat_std0,
std_ddof=1)
np.testing.assert_array_equal(feat_test, feat_valid)
# Mean (channel-wise) + SD (channel-wise), self-mean shift
feat_valid = ((feat - feat_mean_ch) / feat_std_ch) * feat_std0 + feat_mean_ch
feat_test = normalize_feature(feat,
channel_axis=0, channel_wise_mean=True, channel_wise_std=True,
shift='self', scale=feat_std0,
std_ddof=1)
np.testing.assert_array_equal(feat_test, feat_valid)
# Mean (channel-wise) + SD (channel-wise), self-mean shift and self-SD scale
feat_valid = ((feat - feat_mean_ch) / feat_std_ch) * feat_std_ch + feat_mean_ch
feat_test = normalize_feature(feat,
channel_axis=0, channel_wise_mean=True, channel_wise_std=True,
shift='self', scale='self',
std_ddof=1)
# SD scaling only
feat_valid = (feat / feat_std_all) * feat_std0
feat_test = normalize_feature(feat,
scaling_only=True,
channel_wise_std=False,
scale=feat_std0, std_ddof=1)
np.testing.assert_array_equal(feat_test, feat_valid)
if __name__ == '__main__':
suite = TestLoader().loadTestsFromTestCase(TestUtilFeature)
TextTestRunner(verbosity=2).run(suite)
| [
"unittest.TextTestRunner",
"numpy.std",
"numpy.testing.assert_array_equal",
"bdpy.feature.normalize_feature",
"numpy.mean",
"unittest.TestLoader",
"numpy.random.rand"
] | [((214, 234), 'numpy.random.rand', 'np.random.rand', (['(4096)'], {}), '(4096)\n', (228, 234), True, 'import numpy as np\n'), ((256, 276), 'numpy.random.rand', 'np.random.rand', (['(1)', '(1)'], {}), '(1, 1)\n', (270, 276), True, 'import numpy as np\n'), ((297, 317), 'numpy.random.rand', 'np.random.rand', (['(1)', '(1)'], {}), '(1, 1)\n', (311, 317), True, 'import numpy as np\n'), ((360, 399), 'numpy.mean', 'np.mean', (['feat'], {'axis': 'None', 'keepdims': '(True)'}), '(feat, axis=None, keepdims=True)\n', (367, 399), True, 'import numpy as np\n'), ((424, 463), 'numpy.mean', 'np.mean', (['feat'], {'axis': 'None', 'keepdims': '(True)'}), '(feat, axis=None, keepdims=True)\n', (431, 463), True, 'import numpy as np\n'), ((486, 535), 'numpy.std', 'np.std', (['feat'], {'axis': 'None', 'ddof': 'ddof', 'keepdims': '(True)'}), '(feat, axis=None, ddof=ddof, keepdims=True)\n', (492, 535), True, 'import numpy as np\n'), ((788, 925), 'bdpy.feature.normalize_feature', 'normalize_feature', (['feat'], {'channel_axis': '(0)', 'channel_wise_mean': '(True)', 'channel_wise_std': '(True)', 'shift': 'feat_mean0', 'scale': 'feat_std0', 'std_ddof': '(1)'}), '(feat, channel_axis=0, channel_wise_mean=True,\n channel_wise_std=True, shift=feat_mean0, scale=feat_std0, std_ddof=1)\n', (805, 925), False, 'from bdpy.feature import normalize_feature\n'), ((1045, 1097), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['feat_test', 'feat_valid'], {}), '(feat_test, feat_valid)\n', (1074, 1097), True, 'import numpy as np\n'), ((1245, 1383), 'bdpy.feature.normalize_feature', 'normalize_feature', (['feat'], {'channel_axis': '(0)', 'channel_wise_mean': '(True)', 'channel_wise_std': '(False)', 'shift': 'feat_mean0', 'scale': 'feat_std0', 'std_ddof': '(1)'}), '(feat, channel_axis=0, channel_wise_mean=True,\n channel_wise_std=False, shift=feat_mean0, scale=feat_std0, std_ddof=1)\n', (1262, 1383), False, 'from bdpy.feature import normalize_feature\n'), ((1503, 1555), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['feat_test', 'feat_valid'], {}), '(feat_test, feat_valid)\n', (1532, 1555), True, 'import numpy as np\n'), ((1703, 1841), 'bdpy.feature.normalize_feature', 'normalize_feature', (['feat'], {'channel_axis': '(0)', 'channel_wise_mean': '(False)', 'channel_wise_std': '(True)', 'shift': 'feat_mean0', 'scale': 'feat_std0', 'std_ddof': '(1)'}), '(feat, channel_axis=0, channel_wise_mean=False,\n channel_wise_std=True, shift=feat_mean0, scale=feat_std0, std_ddof=1)\n', (1720, 1841), False, 'from bdpy.feature import normalize_feature\n'), ((1961, 2013), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['feat_test', 'feat_valid'], {}), '(feat_test, feat_valid)\n', (1990, 2013), True, 'import numpy as np\n'), ((2153, 2292), 'bdpy.feature.normalize_feature', 'normalize_feature', (['feat'], {'channel_axis': '(0)', 'channel_wise_mean': '(False)', 'channel_wise_std': '(False)', 'shift': 'feat_mean0', 'scale': 'feat_std0', 'std_ddof': '(1)'}), '(feat, channel_axis=0, channel_wise_mean=False,\n channel_wise_std=False, shift=feat_mean0, scale=feat_std0, std_ddof=1)\n', (2170, 2292), False, 'from bdpy.feature import normalize_feature\n'), ((2412, 2464), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['feat_test', 'feat_valid'], {}), '(feat_test, feat_valid)\n', (2441, 2464), True, 'import numpy as np\n'), ((2639, 2772), 'bdpy.feature.normalize_feature', 'normalize_feature', (['feat'], {'channel_axis': '(0)', 'channel_wise_mean': '(True)', 'channel_wise_std': '(True)', 'shift': '"""self"""', 'scale': 'feat_std0', 'std_ddof': '(1)'}), "(feat, channel_axis=0, channel_wise_mean=True,\n channel_wise_std=True, shift='self', scale=feat_std0, std_ddof=1)\n", (2656, 2772), False, 'from bdpy.feature import normalize_feature\n'), ((2892, 2944), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['feat_test', 'feat_valid'], {}), '(feat_test, feat_valid)\n', (2921, 2944), True, 'import numpy as np\n'), ((3139, 3269), 'bdpy.feature.normalize_feature', 'normalize_feature', (['feat'], {'channel_axis': '(0)', 'channel_wise_mean': '(True)', 'channel_wise_std': '(True)', 'shift': '"""self"""', 'scale': '"""self"""', 'std_ddof': '(1)'}), "(feat, channel_axis=0, channel_wise_mean=True,\n channel_wise_std=True, shift='self', scale='self', std_ddof=1)\n", (3156, 3269), False, 'from bdpy.feature import normalize_feature\n'), ((3389, 3441), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['feat_test', 'feat_valid'], {}), '(feat_test, feat_valid)\n', (3418, 3441), True, 'import numpy as np\n'), ((3499, 3525), 'numpy.random.rand', 'np.random.rand', (['(64)', '(16)', '(16)'], {}), '(64, 16, 16)\n', (3513, 3525), True, 'import numpy as np\n'), ((3547, 3571), 'numpy.random.rand', 'np.random.rand', (['(64)', '(1)', '(1)'], {}), '(64, 1, 1)\n', (3561, 3571), True, 'import numpy as np\n'), ((3592, 3616), 'numpy.random.rand', 'np.random.rand', (['(64)', '(1)', '(1)'], {}), '(64, 1, 1)\n', (3606, 3616), True, 'import numpy as np\n'), ((3659, 3700), 'numpy.mean', 'np.mean', (['feat'], {'axis': '(1, 2)', 'keepdims': '(True)'}), '(feat, axis=(1, 2), keepdims=True)\n', (3666, 3700), True, 'import numpy as np\n'), ((3725, 3764), 'numpy.mean', 'np.mean', (['feat'], {'axis': 'None', 'keepdims': '(True)'}), '(feat, axis=None, keepdims=True)\n', (3732, 3764), True, 'import numpy as np\n'), ((3787, 3838), 'numpy.std', 'np.std', (['feat'], {'axis': '(1, 2)', 'ddof': 'ddof', 'keepdims': '(True)'}), '(feat, axis=(1, 2), ddof=ddof, keepdims=True)\n', (3793, 3838), True, 'import numpy as np\n'), ((4122, 4259), 'bdpy.feature.normalize_feature', 'normalize_feature', (['feat'], {'channel_axis': '(0)', 'channel_wise_mean': '(True)', 'channel_wise_std': '(True)', 'shift': 'feat_mean0', 'scale': 'feat_std0', 'std_ddof': '(1)'}), '(feat, channel_axis=0, channel_wise_mean=True,\n channel_wise_std=True, shift=feat_mean0, scale=feat_std0, std_ddof=1)\n', (4139, 4259), False, 'from bdpy.feature import normalize_feature\n'), ((4379, 4431), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['feat_test', 'feat_valid'], {}), '(feat_test, feat_valid)\n', (4408, 4431), True, 'import numpy as np\n'), ((4579, 4717), 'bdpy.feature.normalize_feature', 'normalize_feature', (['feat'], {'channel_axis': '(0)', 'channel_wise_mean': '(True)', 'channel_wise_std': '(False)', 'shift': 'feat_mean0', 'scale': 'feat_std0', 'std_ddof': '(1)'}), '(feat, channel_axis=0, channel_wise_mean=True,\n channel_wise_std=False, shift=feat_mean0, scale=feat_std0, std_ddof=1)\n', (4596, 4717), False, 'from bdpy.feature import normalize_feature\n'), ((4837, 4889), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['feat_test', 'feat_valid'], {}), '(feat_test, feat_valid)\n', (4866, 4889), True, 'import numpy as np\n'), ((5037, 5175), 'bdpy.feature.normalize_feature', 'normalize_feature', (['feat'], {'channel_axis': '(0)', 'channel_wise_mean': '(False)', 'channel_wise_std': '(True)', 'shift': 'feat_mean0', 'scale': 'feat_std0', 'std_ddof': '(1)'}), '(feat, channel_axis=0, channel_wise_mean=False,\n channel_wise_std=True, shift=feat_mean0, scale=feat_std0, std_ddof=1)\n', (5054, 5175), False, 'from bdpy.feature import normalize_feature\n'), ((5295, 5347), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['feat_test', 'feat_valid'], {}), '(feat_test, feat_valid)\n', (5324, 5347), True, 'import numpy as np\n'), ((5487, 5626), 'bdpy.feature.normalize_feature', 'normalize_feature', (['feat'], {'channel_axis': '(0)', 'channel_wise_mean': '(False)', 'channel_wise_std': '(False)', 'shift': 'feat_mean0', 'scale': 'feat_std0', 'std_ddof': '(1)'}), '(feat, channel_axis=0, channel_wise_mean=False,\n channel_wise_std=False, shift=feat_mean0, scale=feat_std0, std_ddof=1)\n', (5504, 5626), False, 'from bdpy.feature import normalize_feature\n'), ((5746, 5798), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['feat_test', 'feat_valid'], {}), '(feat_test, feat_valid)\n', (5775, 5798), True, 'import numpy as np\n'), ((5973, 6106), 'bdpy.feature.normalize_feature', 'normalize_feature', (['feat'], {'channel_axis': '(0)', 'channel_wise_mean': '(True)', 'channel_wise_std': '(True)', 'shift': '"""self"""', 'scale': 'feat_std0', 'std_ddof': '(1)'}), "(feat, channel_axis=0, channel_wise_mean=True,\n channel_wise_std=True, shift='self', scale=feat_std0, std_ddof=1)\n", (5990, 6106), False, 'from bdpy.feature import normalize_feature\n'), ((6226, 6278), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['feat_test', 'feat_valid'], {}), '(feat_test, feat_valid)\n', (6255, 6278), True, 'import numpy as np\n'), ((6473, 6603), 'bdpy.feature.normalize_feature', 'normalize_feature', (['feat'], {'channel_axis': '(0)', 'channel_wise_mean': '(True)', 'channel_wise_std': '(True)', 'shift': '"""self"""', 'scale': '"""self"""', 'std_ddof': '(1)'}), "(feat, channel_axis=0, channel_wise_mean=True,\n channel_wise_std=True, shift='self', scale='self', std_ddof=1)\n", (6490, 6603), False, 'from bdpy.feature import normalize_feature\n'), ((6816, 6916), 'bdpy.feature.normalize_feature', 'normalize_feature', (['feat'], {'scaling_only': '(True)', 'channel_wise_std': '(False)', 'scale': 'feat_std0', 'std_ddof': '(1)'}), '(feat, scaling_only=True, channel_wise_std=False, scale=\n feat_std0, std_ddof=1)\n', (6833, 6916), False, 'from bdpy.feature import normalize_feature\n'), ((7035, 7087), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['feat_test', 'feat_valid'], {}), '(feat_test, feat_valid)\n', (7064, 7087), True, 'import numpy as np\n'), ((567, 616), 'numpy.std', 'np.std', (['feat'], {'axis': 'None', 'ddof': 'ddof', 'keepdims': '(True)'}), '(feat, axis=None, ddof=ddof, keepdims=True)\n', (573, 616), True, 'import numpy as np\n'), ((3870, 3921), 'numpy.std', 'np.std', (['feat'], {'axis': '(1, 2)', 'ddof': 'ddof', 'keepdims': '(True)'}), '(feat, axis=(1, 2), ddof=ddof, keepdims=True)\n', (3876, 3921), True, 'import numpy as np\n'), ((7129, 7141), 'unittest.TestLoader', 'TestLoader', ([], {}), '()\n', (7139, 7141), False, 'from unittest import TestCase, TestLoader, TextTestRunner\n'), ((7185, 7212), 'unittest.TextTestRunner', 'TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (7199, 7212), False, 'from unittest import TestCase, TestLoader, TextTestRunner\n')] |
import numpy as np
from random import choice, random
import time
# start timer
start_time = time.time()
# set initial parameters' values
population_size = 50
dimension_size = 3
donors_number = 1
receivers_number = 1
maximum_evaluations = 2500
bound = 200
# other dependent parameters, no need to change
current_evaluation = population_size
x_upper_bound = bound
x_lower_bound = -bound
y_upper_bound = bound
y_lower_bound = -bound
z_upper_bound = bound
z_lower_bound = 0
BUILDING = [20, 50, 200] #b1
# BUILDING = [20, 50, 250] #b2
# BUILDING = [20, 50, 300] #b3
# BUILDING = [10, 50, 250] #b4
# BUILDING = [30, 50, 250] #b5
# BUILDING = [50, 50, 250] #b6
# users location data file path
BUILDING = [20, 50, 200] #b1
user_data = f"users/UserLocations_{BUILDING[0]}_{BUILDING[1]}_{BUILDING[2]}.dat"
Users_Locations = np.loadtxt(user_data) #u2
# ========================================================= #
# Functions #
# ========================================================= #
# generating the initial drones
def generate_drones():
population = np.zeros((population_size, dimension_size))
i = 0
while i < population_size:
new_drone = generate_drone()
if not drone_is_inside(new_drone):
population[i, :] = new_drone
i += 1
return population
# generating just one drone
def generate_drone():
drone = np.zeros(dimension_size)
drone[0] = x_lower_bound + random() * (x_upper_bound - x_lower_bound)
drone[1] = y_lower_bound + random() * (y_upper_bound - y_lower_bound)
drone[2] = z_lower_bound + random() * (z_upper_bound - z_lower_bound)
return drone
# calculating fitness of all drones in population
def calculate_fitnesses(drones):
fitnesses = np.zeros(population_size)
for i in range(population_size):
fitnesses[i] = fitness(drones[i])
return fitnesses
# calculation fitness for one individual
def fitness(drone):
total_sum = 0
for index in range(Users_Locations.shape[0]):
total_sum += fitness_per_user(Users_Locations[index], drone)
return total_sum
# calculating signal strength for one user in the building
def fitness_per_user(user, drone):
dotProduct = 1
up = 0
dp = 0
d2D = np.sqrt(drone[0]**2)
d3D = 0
for i in range(dimension_size):
d3D += (user[i] - drone[i])**2
dotProduct += user[i] * drone[i]
up += user[i]**2
dp += drone[i]**2
d3D = np.sqrt(d3D)
mag_mul = np.sqrt(up) * np.sqrt(dp)
result = (20.0 * np.log10(d3D) + 20.0 * ( 0.301 ) + 32.4) + (14.0 + 15.0 * pow(1.0 - dotProduct/mag_mul, 2.0) ) + (0.5 * d2D)
return result
# perform infection between two individuals
def perform_infection(x_k, x_m):
j = np.random.randint(0, dimension_size)
x_k[j] += np.random.uniform(-1.0, 1.0) * (x_k[j] - x_m[j])
return check_bounds(x_k)
# check if exceeded bounds
def check_bounds(drone):
# check drone's x location
if drone[0] > x_upper_bound: drone[0] = x_upper_bound
elif drone[0] < x_lower_bound: drone[0] = x_lower_bound
# check drone's y location
if drone[1] > y_upper_bound: drone[1] = y_upper_bound
elif drone[1] < y_lower_bound: drone[1] = y_lower_bound
# check drone's z location
if drone[2] > z_upper_bound: drone[2] = z_upper_bound
elif drone[2] < z_lower_bound: drone[2] = z_lower_bound
while drone_is_inside(drone):
drone = generate_drone()
return drone
# get lists of indexes of doreceivers_numbers and recievers
def get_donors_and_receivers_indexes(fitnesses):
donors = []
receivers = []
sorted_indexes = np.argsort(fitnesses)
for i in range(donors_number):
donors.append(sorted_indexes[i])
for i in range(receivers_number):
receivers.append(sorted_indexes[-1 - i])
return donors, receivers
# performing plasma tranfer from donor to receiver indvidual
def perform_plasma_transfer(receiver, donor):
for j in range(dimension_size):
receiver[j] += np.random.uniform(-1.0, 1.0) * (receiver[j] - donor[j])
return check_bounds(receiver)
# updating donor's parameters
def update_donor(donor):
for j in range(dimension_size):
donor[j] += np.random.uniform(-1.0, 1.0) * donor[j]
return check_bounds(donor)
# compare individual's fitness with global fitness value
def compare_with_best_fitness(x):
global best_fitness
x_fitness = fitness(x)
if x_fitness < best_fitness:
best_fitness = x_fitness
# best_index = fitnesses.index(min(fitnesses))
best_index = np.where(fitnesses == best_fitness)
print(f"Best: {best_fitness} \t - location: {population[best_index]}")
# check if the drone is inside the building or not
def drone_is_inside(drone):
x_in = False
y_in = False
z_in = False
if 0 <= drone[0] <= BUILDING[0]: x_in = True
if 0 <= drone[1] <= BUILDING[1]: y_in = True
if 0 <= drone[2] <= BUILDING[2]: z_in = True
return (x_in and y_in and z_in)
# ========================================================= #
# Start of IPA #
# ========================================================= #
# generating initial population
population = generate_drones()
# calculating fitness of population
fitnesses = calculate_fitnesses(population)
# finding best individual fitness
best_fitness = min(fitnesses)
# print("==> Population: ")
# print(population)
# print("==> Fitnesses: ")
# print(fitnesses)
# for i in range(population_size):
# print(f"Location: {population[i, :]} \t- Fitness: {fitnesses[i]} \t- is inside: {drone_is_inside(population[i])}")
print(f"Initial best fitness value: {best_fitness}")
print(f"Number of parameters: {dimension_size}")
print(f"Population size: {population_size}")
while current_evaluation < maximum_evaluations:
# start of infection phase
for index in range(population_size):
if current_evaluation < maximum_evaluations:
current_evaluation += 1
random_index = np.random.randint(0, population_size)
while random_index == index:
random_index = np.random.randint(0, population_size)
current_individual = population[index].copy()
random_individual = population[random_index].copy()
infected_individual = perform_infection(current_individual, random_individual)
fitness_of_infected = fitness(infected_individual)
if fitness_of_infected < fitnesses[index]:
population[index] = infected_individual.copy()
fitnesses[index] = fitness_of_infected
compare_with_best_fitness(infected_individual)
else:
break # if exceeded maximum evaluation number
# start of plasma transfering phase
# generating dose_control and treatment_control vectors
dose_control = np.ones(receivers_number, int)
treatment_control = np.ones(receivers_number, int)
# get indexes of both of donors and receivers
donors_indexes, receivers_indexes = get_donors_and_receivers_indexes(fitnesses)
for i in range(receivers_number):
receiver_index = receivers_indexes[i]
random_donor_index = donors_indexes[int(np.random.randint(0, donors_number))]
current_receiver = population[receiver_index]
random_donor = population[random_donor_index]
while treatment_control[i] == 1:
if current_evaluation < maximum_evaluations:
current_evaluation += 1
treated_individual = perform_plasma_transfer(current_receiver, random_donor)
treated_fitness = fitness(treated_individual)
if dose_control[i] == 1:
if treated_fitness < fitnesses[random_donor_index]:
dose_control[i] += 1
population[receiver_index] = treated_individual.copy()
fitnesses[receiver_index] = treated_fitness
else:
population[receiver_index] = random_donor.copy()
fitnesses[receiver_index] = fitnesses[random_donor_index]
treatment_control[i] = 0
else:
if treated_fitness < fitnesses[receiver_index]:
population[receiver_index] = treated_individual.copy()
fitnesses[receiver_index] = treated_fitness
else:
treatment_control[i] = 0
compare_with_best_fitness(population[receiver_index])
else:
break # if exceeded maximum evaluation number
# start of donors updating phase
for i in range(donors_number):
if current_evaluation < maximum_evaluations:
current_evaluation += 1
donor_index = donors_indexes[i]
if (current_evaluation / maximum_evaluations) > random():
population[donor_index] = update_donor(population[donor_index])
else:
population[donor_index] = generate_drone()
fitnesses[donor_index] = fitness(population[donor_index])
compare_with_best_fitness(population[donor_index])
else:
break # if exceeded maximum evaluation number
# print elapsed time
end_time = time.time()
print(f"Elapsed time: {(end_time - start_time):.2f} seconds")
# print best fitness value in scientific notation
print(f"Best fitness value: {best_fitness:.6e}")
print(fitnesses)
| [
"numpy.random.uniform",
"numpy.zeros",
"numpy.ones",
"time.time",
"numpy.argsort",
"random.random",
"numpy.random.randint",
"numpy.where",
"numpy.loadtxt",
"numpy.log10",
"numpy.sqrt"
] | [((93, 104), 'time.time', 'time.time', ([], {}), '()\n', (102, 104), False, 'import time\n'), ((855, 876), 'numpy.loadtxt', 'np.loadtxt', (['user_data'], {}), '(user_data)\n', (865, 876), True, 'import numpy as np\n'), ((9417, 9428), 'time.time', 'time.time', ([], {}), '()\n', (9426, 9428), False, 'import time\n'), ((1145, 1188), 'numpy.zeros', 'np.zeros', (['(population_size, dimension_size)'], {}), '((population_size, dimension_size))\n', (1153, 1188), True, 'import numpy as np\n'), ((1455, 1479), 'numpy.zeros', 'np.zeros', (['dimension_size'], {}), '(dimension_size)\n', (1463, 1479), True, 'import numpy as np\n'), ((1819, 1844), 'numpy.zeros', 'np.zeros', (['population_size'], {}), '(population_size)\n', (1827, 1844), True, 'import numpy as np\n'), ((2311, 2333), 'numpy.sqrt', 'np.sqrt', (['(drone[0] ** 2)'], {}), '(drone[0] ** 2)\n', (2318, 2333), True, 'import numpy as np\n'), ((2527, 2539), 'numpy.sqrt', 'np.sqrt', (['d3D'], {}), '(d3D)\n', (2534, 2539), True, 'import numpy as np\n'), ((2815, 2851), 'numpy.random.randint', 'np.random.randint', (['(0)', 'dimension_size'], {}), '(0, dimension_size)\n', (2832, 2851), True, 'import numpy as np\n'), ((3694, 3715), 'numpy.argsort', 'np.argsort', (['fitnesses'], {}), '(fitnesses)\n', (3704, 3715), True, 'import numpy as np\n'), ((6955, 6985), 'numpy.ones', 'np.ones', (['receivers_number', 'int'], {}), '(receivers_number, int)\n', (6962, 6985), True, 'import numpy as np\n'), ((7010, 7040), 'numpy.ones', 'np.ones', (['receivers_number', 'int'], {}), '(receivers_number, int)\n', (7017, 7040), True, 'import numpy as np\n'), ((2554, 2565), 'numpy.sqrt', 'np.sqrt', (['up'], {}), '(up)\n', (2561, 2565), True, 'import numpy as np\n'), ((2568, 2579), 'numpy.sqrt', 'np.sqrt', (['dp'], {}), '(dp)\n', (2575, 2579), True, 'import numpy as np\n'), ((2866, 2894), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (2883, 2894), True, 'import numpy as np\n'), ((4633, 4668), 'numpy.where', 'np.where', (['(fitnesses == best_fitness)'], {}), '(fitnesses == best_fitness)\n', (4641, 4668), True, 'import numpy as np\n'), ((1511, 1519), 'random.random', 'random', ([], {}), '()\n', (1517, 1519), False, 'from random import choice, random\n'), ((1585, 1593), 'random.random', 'random', ([], {}), '()\n', (1591, 1593), False, 'from random import choice, random\n'), ((1659, 1667), 'random.random', 'random', ([], {}), '()\n', (1665, 1667), False, 'from random import choice, random\n'), ((4075, 4103), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (4092, 4103), True, 'import numpy as np\n'), ((4277, 4305), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (4294, 4305), True, 'import numpy as np\n'), ((6099, 6136), 'numpy.random.randint', 'np.random.randint', (['(0)', 'population_size'], {}), '(0, population_size)\n', (6116, 6136), True, 'import numpy as np\n'), ((6209, 6246), 'numpy.random.randint', 'np.random.randint', (['(0)', 'population_size'], {}), '(0, population_size)\n', (6226, 6246), True, 'import numpy as np\n'), ((7309, 7344), 'numpy.random.randint', 'np.random.randint', (['(0)', 'donors_number'], {}), '(0, donors_number)\n', (7326, 7344), True, 'import numpy as np\n'), ((9012, 9020), 'random.random', 'random', ([], {}), '()\n', (9018, 9020), False, 'from random import choice, random\n'), ((2602, 2615), 'numpy.log10', 'np.log10', (['d3D'], {}), '(d3D)\n', (2610, 2615), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
'''
StarGAN_v2_paddle.core.model
@author: RyanHuang
@github: DrRyanHuang
@updateTime: 2020.8.15
@notice: GPL v3
'''
import math
import copy
import numpy as np
from munch import Munch
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear
#from .wing import FAN
#from core.wing import FAN
class ResBlk(fluid.dygraph.Layer):
def __init__(self, dim_in, dim_out, actv=None,
normalize=False, downsample=False):
super(ResBlk, self).__init__(self.__class__.__name__)
if actv is None:
actv = (lambda x:fluid.layers.leaky_relu(x, alpha=0.2))
self.actv = actv
self.downsample = downsample
self.learned_sc = (dim_in != dim_out)
self.normalize = normalize
self._build_weights(dim_in, dim_out)
def _build_weights(self, dim_in, dim_out):
self.conv1 = Conv2D(num_channels=dim_in, num_filters=dim_in, filter_size=3, stride=1, padding=1)
self.conv2 = Conv2D(num_channels=dim_in, num_filters=dim_out, filter_size=3, stride=1, padding=1)
if self.normalize:
self.norm1 = InstanceNorm(dim_in) # 没有 `momentum` 部分, 已在github提交issue
self.norm2 = InstanceNorm(dim_in)
if self.learned_sc:
self.conv1x1 = Conv2D(dim_in, dim_out, 1, 1, 0, bias_attr=False)
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = fluid.layers.pool2d(x, pool_size=2, pool_stride=2, pool_type='avg')
return x
def _residual(self, x):
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
x = self.conv1(x)
if self.downsample:
x = fluid.layers.pool2d(x, pool_size=2, pool_stride=2, pool_type='avg')
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, inputs):
x = self._shortcut(inputs) + self._residual(inputs)
return x / math.sqrt(2) # unit variance
class AdaIN(fluid.dygraph.Layer):
def __init__(self, style_dim, num_features):
super(AdaIN, self).__init__(self.__class__.__name__)
self.norm = InstanceNorm(num_features)
self.fc = Linear(style_dim, num_features*2)
def forward(self, x, s):
h = self.fc(s)
h = fluid.layers.reshape(h, shape=(h.shape[0], h.shape[1], 1, 1))
gamma, beta = fluid.layers.split(h, num_or_sections=2, dim=1)
return (1 + gamma) * self.norm(x) + beta
class AdainResBlk(fluid.dygraph.Layer):
def __init__(self, dim_in, dim_out, style_dim=64, w_hpf=0, actv=None,
upsample=False):
super(AdainResBlk, self).__init__(self.__class__.__name__)
self.w_hpf = w_hpf
self.actv = (lambda x:fluid.layers.leaky_relu(x, alpha=0.2)) if actv is None else actv
self.upsample = upsample
self.learned_sc = (dim_in != dim_out)
self._build_weights(dim_in, dim_out, style_dim)
def _build_weights(self, dim_in, dim_out, style_dim=64):
self.conv1 = Conv2D(dim_in, dim_out, 3, 1, 1)
self.conv2 = Conv2D(dim_out, dim_out, 3, 1, 1)
self.norm1 = AdaIN(style_dim, dim_in)
self.norm2 = AdaIN(style_dim, dim_out)
if self.learned_sc:
self.conv1x1 = Conv2D(dim_in, dim_out, 1, 1, 0)
def _shortcut(self, x):
if self.upsample:
x = fluid.layers.image_resize(x, resample='NEAREST', scale=2)
if self.learned_sc:
x = self.conv1x1(x)
return x
def _residual(self, x, s):
x = self.norm1(x, s)
x = self.actv(x)
if self.upsample:
x = fluid.layers.image_resize(x, resample='NEAREST', scale=2)
x = self.conv1(x)
x = self.norm2(x, s)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, x, s):
out = self._residual(x, s)
if self.w_hpf == 0:
out = (out + self._shortcut(x)) / math.sqrt(2)
return out
class HighPass(fluid.dygraph.Layer):
def __init__(self, w_hpf):
super(HighPass, self).__init__(self.__class__.__name__)
self.filter_ = np.array([[-1, -1, -1],
[-1, 8., -1],
[-1, -1, -1]]).reshape(1, 1, 3, 3) / w_hpf
def forward(self, x):
filter_ = self.filter_.repeat(x.shape[1], axis=0)
param = fluid.initializer.NumpyArrayInitializer(filter_)
x = fluid.layers.conv2d(x, num_filters=1, filter_size=3, padding=1, groups=x.shape[1], param_attr=param, bias_attr=False)
return x
class LeakyRelu(fluid.dygraph.Layer):
def __init__(self, alpha=0.2):
super(LeakyRelu, self).__init__(self.__class__.__name__)
self.alpha = alpha
def forward(self, x):
return fluid.layers.leaky_relu(x, self.alpha)
# =============================================================================
# 通过该函数解决 `out[idx, y]` 不能直接索引的问题
# =============================================================================
def get_value_by_index(out, idx, y, who='Discriminator'):
temp = []
for i, j in zip(idx, y):
item = out[int(i)][int(j)]
if who == 'not_discri':
item = fluid.layers.reshape(item, (-1, item.shape[0]))
temp.append(item)
temp = fluid.layers.concat(temp)
return temp
class Generator(fluid.dygraph.Layer):
def __init__(self, img_size=256, style_dim=64, max_conv_dim=512, w_hpf=1):
super(Generator, self).__init__(self.__class__.__name__)
dim_in = 2**14 // img_size
self.img_size = img_size
self.from_rgb = Conv2D(3, dim_in, 3, 1, 1)
self.encode = fluid.dygraph.LayerList()
self.decode = fluid.dygraph.LayerList()
self.to_rgb = fluid.dygraph.Sequential(
InstanceNorm(dim_in),
LeakyRelu(0.2),
Conv2D(dim_in, 3, 1, 1, 0)
)
# down/up-sampling blocks
repeat_num = int(np.log2(img_size)) - 4
if w_hpf > 0:
repeat_num += 1
for _ in range(repeat_num):
dim_out = min(dim_in*2, max_conv_dim)
self.encode.append(
ResBlk(dim_in, dim_out, normalize=True, downsample=True)
)
self.decode.insert(
0, AdainResBlk(dim_out, dim_in, style_dim,
w_hpf=w_hpf, upsample=True) # stack-like
)
dim_in = dim_out
# bottleneck blocks
for _ in range(2):
self.encode.append(
ResBlk(dim_out, dim_out, normalize=True)
)
self.decode.insert(
0, AdainResBlk(dim_out, dim_out, style_dim, w_hpf=w_hpf)
)
if w_hpf > 0:
self.hpf = HighPass(w_hpf)
def forward(self, x, s, masks=None):
x = self.from_rgb(x)
cache = {}
for block in self.encode:
if (masks is not None) and (x.shape[2] in [32, 64, 128]):
cache[x.shape[2]] = x
x = block(x)
for block in self.decode:
x = block(x, s)
if (masks is not None) and (s.shape[2] in [32, 64, 128]):
mask = masks[0] if x.shape[2] in [32] else masks[1]
mask = fluid.layers.image_resize(mask, size=x.shape[2], resample='BILINEAR')
x = x + self.hpf(mask * cache[x.shape[2]])
return self.to_rgb(x)
class MappingNetwork(fluid.dygraph.Layer):
def __init__(self, latent_dim=16, style_dim=64, num_domains=2):
super(MappingNetwork, self).__init__(self.__class__.__name__)
layers = []
layers += [Linear(latent_dim, 512, act='relu')]
for _ in range(3):
layers += [Linear(512, 512, act='relu')]
self.shared = fluid.dygraph.Sequential(
*layers
)
self.unshared = fluid.dygraph.LayerList()
for _ in range(num_domains):
self.unshared.append(
fluid.dygraph.Sequential(
Linear(512, 512, act='relu'),
Linear(512, 512, act='relu'),
Linear(512, 512, act='relu'),
Linear(512, style_dim, act=None)
)
)
def forward(self, z, y):
h = self.shared(z)
out = []
for layer in self.unshared:
out += [layer(h)]
out = fluid.layers.stack(out, axis=1) # (batch, num_domains, style_dim)
idx = to_variable(np.arange(y.shape[0], dtype=np.int))
s = get_value_by_index(out, idx, y, who='not_discri') # (batch, style_dim)
return s
class StyleEncoder(fluid.dygraph.Layer):
def __init__(self, img_size=256, style_dim=64, num_domains=2, max_conv_dim=512):
super(StyleEncoder, self).__init__(self.__class__.__name__)
dim_in = 2**14 // img_size
blocks = []
blocks += [Conv2D(3, dim_in, 3, 1, 1)]
repeat_num = int(np.log2(img_size)) - 2
for _ in range(repeat_num):
dim_out = min(dim_in*2, max_conv_dim)
blocks += [ResBlk(dim_in, dim_out, downsample=True)]
dim_in = dim_out
blocks += [LeakyRelu(0.2)]
blocks += [Conv2D(dim_out, dim_out, 4, 1, 0)]
blocks += [LeakyRelu(0.2)]
self.shared = fluid.dygraph.Sequential(*blocks)
self.unshared = fluid.dygraph.LayerList()
for _ in range(num_domains):
self.unshared.append(Linear(dim_out, style_dim))
def forward(self, x, y):
h = self.shared(x)
h = fluid.layers.reshape(h, (h.shape[0], -1))
out = []
for layer in self.unshared:
out += [layer(h)]
out = fluid.layers.stack(out, axis=1) # (batch, num_domains, style_dim)
idx = to_variable(np.arange(y.shape[0], dtype=np.int))
s = get_value_by_index(out, idx, y, who='not_discri') # (batch, style_dim)
return s
class Discriminator(fluid.dygraph.Layer):
def __init__(self, img_size=256, num_domains=2, max_conv_dim=512):
super(Discriminator, self).__init__(self.__class__.__name__)
dim_in = 2**14 // img_size
blocks = []
blocks += [Conv2D(3, dim_in, 3, 1, 1)]
repeat_num = int(np.log2(img_size)) - 2
for _ in range(repeat_num):
dim_out = min(dim_in*2, max_conv_dim)
blocks += [ResBlk(dim_in, dim_out, downsample=True)]
dim_in = dim_out
blocks += [LeakyRelu(0.2)]
blocks += [Conv2D(dim_out, dim_out, 4, 1, 0)]
blocks += [LeakyRelu(0.2)]
blocks += [Conv2D(dim_out, num_domains, 1, 1, 0)]
self.main = fluid.dygraph.Sequential(*blocks)
def forward(self, x, y):
out = self.main(x)
out = fluid.layers.reshape(out, (x.shape[0], -1))
idx = to_variable(np.arange(y.shape[0], dtype=np.int))
out = get_value_by_index(out, idx, y)
return out
def build_model(args):
generator = Generator(args.img_size, args.style_dim, w_hpf=args.w_hpf)
mapping_network = MappingNetwork(args.latent_dim, args.style_dim, args.num_domains)
style_encoder = StyleEncoder(args.img_size, args.style_dim, args.num_domains)
discriminator = Discriminator(args.img_size, args.num_domains)
# --------- TypeError: can't pickle XX objects ---------
# generator_ema = copy.deepcopy(generator)
# mapping_network_ema = copy.deepcopy(mapping_network)
# style_encoder_ema = copy.deepcopy(style_encoder)
# ------------------------------------------------------
generator_ema = Generator(args.img_size, args.style_dim, w_hpf=args.w_hpf)
generator_ema.set_dict(generator.state_dict().copy())
mapping_network_ema = MappingNetwork(args.latent_dim, args.style_dim, args.num_domains)
mapping_network_ema.set_dict(mapping_network.state_dict().copy())
style_encoder_ema = StyleEncoder(args.img_size, args.style_dim, args.num_domains)
style_encoder_ema.set_dict(style_encoder.state_dict().copy())
nets = Munch(generator=generator,
mapping_network=mapping_network,
style_encoder=style_encoder,
discriminator=discriminator)
nets_ema = Munch(generator=generator_ema,
mapping_network=mapping_network_ema,
style_encoder=style_encoder_ema)
if args.w_hpf > 0:
fan = FAN(fname_pretrained=args.wing_path).eval()
nets.fan = fan
nets_ema.fan = fan
return nets, nets_ema | [
"paddle.fluid.layers.split",
"paddle.fluid.dygraph.LayerList",
"numpy.arange",
"paddle.fluid.layers.pool2d",
"paddle.fluid.layers.concat",
"paddle.fluid.layers.image_resize",
"paddle.fluid.dygraph.Linear",
"paddle.fluid.initializer.NumpyArrayInitializer",
"paddle.fluid.layers.reshape",
"math.sqrt"... | [((5502, 5527), 'paddle.fluid.layers.concat', 'fluid.layers.concat', (['temp'], {}), '(temp)\n', (5521, 5527), True, 'import paddle.fluid as fluid\n'), ((12356, 12478), 'munch.Munch', 'Munch', ([], {'generator': 'generator', 'mapping_network': 'mapping_network', 'style_encoder': 'style_encoder', 'discriminator': 'discriminator'}), '(generator=generator, mapping_network=mapping_network, style_encoder=\n style_encoder, discriminator=discriminator)\n', (12361, 12478), False, 'from munch import Munch\n'), ((12540, 12644), 'munch.Munch', 'Munch', ([], {'generator': 'generator_ema', 'mapping_network': 'mapping_network_ema', 'style_encoder': 'style_encoder_ema'}), '(generator=generator_ema, mapping_network=mapping_network_ema,\n style_encoder=style_encoder_ema)\n', (12545, 12644), False, 'from munch import Munch\n'), ((940, 1027), 'paddle.fluid.dygraph.Conv2D', 'Conv2D', ([], {'num_channels': 'dim_in', 'num_filters': 'dim_in', 'filter_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(num_channels=dim_in, num_filters=dim_in, filter_size=3, stride=1,\n padding=1)\n', (946, 1027), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((1046, 1134), 'paddle.fluid.dygraph.Conv2D', 'Conv2D', ([], {'num_channels': 'dim_in', 'num_filters': 'dim_out', 'filter_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(num_channels=dim_in, num_filters=dim_out, filter_size=3, stride=1,\n padding=1)\n', (1052, 1134), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((2313, 2339), 'paddle.fluid.dygraph.InstanceNorm', 'InstanceNorm', (['num_features'], {}), '(num_features)\n', (2325, 2339), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((2358, 2393), 'paddle.fluid.dygraph.Linear', 'Linear', (['style_dim', '(num_features * 2)'], {}), '(style_dim, num_features * 2)\n', (2364, 2393), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((2457, 2518), 'paddle.fluid.layers.reshape', 'fluid.layers.reshape', (['h'], {'shape': '(h.shape[0], h.shape[1], 1, 1)'}), '(h, shape=(h.shape[0], h.shape[1], 1, 1))\n', (2477, 2518), True, 'import paddle.fluid as fluid\n'), ((2541, 2588), 'paddle.fluid.layers.split', 'fluid.layers.split', (['h'], {'num_or_sections': '(2)', 'dim': '(1)'}), '(h, num_or_sections=2, dim=1)\n', (2559, 2588), True, 'import paddle.fluid as fluid\n'), ((3202, 3234), 'paddle.fluid.dygraph.Conv2D', 'Conv2D', (['dim_in', 'dim_out', '(3)', '(1)', '(1)'], {}), '(dim_in, dim_out, 3, 1, 1)\n', (3208, 3234), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((3256, 3289), 'paddle.fluid.dygraph.Conv2D', 'Conv2D', (['dim_out', 'dim_out', '(3)', '(1)', '(1)'], {}), '(dim_out, dim_out, 3, 1, 1)\n', (3262, 3289), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((4585, 4633), 'paddle.fluid.initializer.NumpyArrayInitializer', 'fluid.initializer.NumpyArrayInitializer', (['filter_'], {}), '(filter_)\n', (4624, 4633), True, 'import paddle.fluid as fluid\n'), ((4646, 4768), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', (['x'], {'num_filters': '(1)', 'filter_size': '(3)', 'padding': '(1)', 'groups': 'x.shape[1]', 'param_attr': 'param', 'bias_attr': '(False)'}), '(x, num_filters=1, filter_size=3, padding=1, groups=x.\n shape[1], param_attr=param, bias_attr=False)\n', (4665, 4768), True, 'import paddle.fluid as fluid\n'), ((4990, 5028), 'paddle.fluid.layers.leaky_relu', 'fluid.layers.leaky_relu', (['x', 'self.alpha'], {}), '(x, self.alpha)\n', (5013, 5028), True, 'import paddle.fluid as fluid\n'), ((5833, 5859), 'paddle.fluid.dygraph.Conv2D', 'Conv2D', (['(3)', 'dim_in', '(3)', '(1)', '(1)'], {}), '(3, dim_in, 3, 1, 1)\n', (5839, 5859), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((5883, 5908), 'paddle.fluid.dygraph.LayerList', 'fluid.dygraph.LayerList', ([], {}), '()\n', (5906, 5908), True, 'import paddle.fluid as fluid\n'), ((5931, 5956), 'paddle.fluid.dygraph.LayerList', 'fluid.dygraph.LayerList', ([], {}), '()\n', (5954, 5956), True, 'import paddle.fluid as fluid\n'), ((8038, 8071), 'paddle.fluid.dygraph.Sequential', 'fluid.dygraph.Sequential', (['*layers'], {}), '(*layers)\n', (8062, 8071), True, 'import paddle.fluid as fluid\n'), ((8119, 8144), 'paddle.fluid.dygraph.LayerList', 'fluid.dygraph.LayerList', ([], {}), '()\n', (8142, 8144), True, 'import paddle.fluid as fluid\n'), ((8646, 8677), 'paddle.fluid.layers.stack', 'fluid.layers.stack', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (8664, 8677), True, 'import paddle.fluid as fluid\n'), ((9574, 9607), 'paddle.fluid.dygraph.Sequential', 'fluid.dygraph.Sequential', (['*blocks'], {}), '(*blocks)\n', (9598, 9607), True, 'import paddle.fluid as fluid\n'), ((9641, 9666), 'paddle.fluid.dygraph.LayerList', 'fluid.dygraph.LayerList', ([], {}), '()\n', (9664, 9666), True, 'import paddle.fluid as fluid\n'), ((9847, 9888), 'paddle.fluid.layers.reshape', 'fluid.layers.reshape', (['h', '(h.shape[0], -1)'], {}), '(h, (h.shape[0], -1))\n', (9867, 9888), True, 'import paddle.fluid as fluid\n'), ((9986, 10017), 'paddle.fluid.layers.stack', 'fluid.layers.stack', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (10004, 10017), True, 'import paddle.fluid as fluid\n'), ((10974, 11007), 'paddle.fluid.dygraph.Sequential', 'fluid.dygraph.Sequential', (['*blocks'], {}), '(*blocks)\n', (10998, 11007), True, 'import paddle.fluid as fluid\n'), ((11083, 11126), 'paddle.fluid.layers.reshape', 'fluid.layers.reshape', (['out', '(x.shape[0], -1)'], {}), '(out, (x.shape[0], -1))\n', (11103, 11126), True, 'import paddle.fluid as fluid\n'), ((1183, 1203), 'paddle.fluid.dygraph.InstanceNorm', 'InstanceNorm', (['dim_in'], {}), '(dim_in)\n', (1195, 1203), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((1265, 1285), 'paddle.fluid.dygraph.InstanceNorm', 'InstanceNorm', (['dim_in'], {}), '(dim_in)\n', (1277, 1285), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((1341, 1390), 'paddle.fluid.dygraph.Conv2D', 'Conv2D', (['dim_in', 'dim_out', '(1)', '(1)', '(0)'], {'bias_attr': '(False)'}), '(dim_in, dim_out, 1, 1, 0, bias_attr=False)\n', (1347, 1390), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((1536, 1603), 'paddle.fluid.layers.pool2d', 'fluid.layers.pool2d', (['x'], {'pool_size': '(2)', 'pool_stride': '(2)', 'pool_type': '"""avg"""'}), "(x, pool_size=2, pool_stride=2, pool_type='avg')\n", (1555, 1603), True, 'import paddle.fluid as fluid\n'), ((1806, 1873), 'paddle.fluid.layers.pool2d', 'fluid.layers.pool2d', (['x'], {'pool_size': '(2)', 'pool_stride': '(2)', 'pool_type': '"""avg"""'}), "(x, pool_size=2, pool_stride=2, pool_type='avg')\n", (1825, 1873), True, 'import paddle.fluid as fluid\n'), ((2114, 2126), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (2123, 2126), False, 'import math\n'), ((3438, 3470), 'paddle.fluid.dygraph.Conv2D', 'Conv2D', (['dim_in', 'dim_out', '(1)', '(1)', '(0)'], {}), '(dim_in, dim_out, 1, 1, 0)\n', (3444, 3470), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((3546, 3603), 'paddle.fluid.layers.image_resize', 'fluid.layers.image_resize', (['x'], {'resample': '"""NEAREST"""', 'scale': '(2)'}), "(x, resample='NEAREST', scale=2)\n", (3571, 3603), True, 'import paddle.fluid as fluid\n'), ((3809, 3866), 'paddle.fluid.layers.image_resize', 'fluid.layers.image_resize', (['x'], {'resample': '"""NEAREST"""', 'scale': '(2)'}), "(x, resample='NEAREST', scale=2)\n", (3834, 3866), True, 'import paddle.fluid as fluid\n'), ((5417, 5464), 'paddle.fluid.layers.reshape', 'fluid.layers.reshape', (['item', '(-1, item.shape[0])'], {}), '(item, (-1, item.shape[0]))\n', (5437, 5464), True, 'import paddle.fluid as fluid\n'), ((6017, 6037), 'paddle.fluid.dygraph.InstanceNorm', 'InstanceNorm', (['dim_in'], {}), '(dim_in)\n', (6029, 6037), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((6079, 6105), 'paddle.fluid.dygraph.Conv2D', 'Conv2D', (['dim_in', '(3)', '(1)', '(1)', '(0)'], {}), '(dim_in, 3, 1, 1, 0)\n', (6085, 6105), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((7890, 7925), 'paddle.fluid.dygraph.Linear', 'Linear', (['latent_dim', '(512)'], {'act': '"""relu"""'}), "(latent_dim, 512, act='relu')\n", (7896, 7925), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((8761, 8796), 'numpy.arange', 'np.arange', (['y.shape[0]'], {'dtype': 'np.int'}), '(y.shape[0], dtype=np.int)\n', (8770, 8796), True, 'import numpy as np\n'), ((9170, 9196), 'paddle.fluid.dygraph.Conv2D', 'Conv2D', (['(3)', 'dim_in', '(3)', '(1)', '(1)'], {}), '(3, dim_in, 3, 1, 1)\n', (9176, 9196), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((9482, 9515), 'paddle.fluid.dygraph.Conv2D', 'Conv2D', (['dim_out', 'dim_out', '(4)', '(1)', '(0)'], {}), '(dim_out, dim_out, 4, 1, 0)\n', (9488, 9515), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((10100, 10135), 'numpy.arange', 'np.arange', (['y.shape[0]'], {'dtype': 'np.int'}), '(y.shape[0], dtype=np.int)\n', (10109, 10135), True, 'import numpy as np\n'), ((10506, 10532), 'paddle.fluid.dygraph.Conv2D', 'Conv2D', (['(3)', 'dim_in', '(3)', '(1)', '(1)'], {}), '(3, dim_in, 3, 1, 1)\n', (10512, 10532), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((10826, 10859), 'paddle.fluid.dygraph.Conv2D', 'Conv2D', (['dim_out', 'dim_out', '(4)', '(1)', '(0)'], {}), '(dim_out, dim_out, 4, 1, 0)\n', (10832, 10859), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((10915, 10952), 'paddle.fluid.dygraph.Conv2D', 'Conv2D', (['dim_out', 'num_domains', '(1)', '(1)', '(0)'], {}), '(dim_out, num_domains, 1, 1, 0)\n', (10921, 10952), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((11153, 11188), 'numpy.arange', 'np.arange', (['y.shape[0]'], {'dtype': 'np.int'}), '(y.shape[0], dtype=np.int)\n', (11162, 11188), True, 'import numpy as np\n'), ((639, 676), 'paddle.fluid.layers.leaky_relu', 'fluid.layers.leaky_relu', (['x'], {'alpha': '(0.2)'}), '(x, alpha=0.2)\n', (662, 676), True, 'import paddle.fluid as fluid\n'), ((2918, 2955), 'paddle.fluid.layers.leaky_relu', 'fluid.layers.leaky_relu', (['x'], {'alpha': '(0.2)'}), '(x, alpha=0.2)\n', (2941, 2955), True, 'import paddle.fluid as fluid\n'), ((4129, 4141), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (4138, 4141), False, 'import math\n'), ((6176, 6193), 'numpy.log2', 'np.log2', (['img_size'], {}), '(img_size)\n', (6183, 6193), True, 'import numpy as np\n'), ((7510, 7579), 'paddle.fluid.layers.image_resize', 'fluid.layers.image_resize', (['mask'], {'size': 'x.shape[2]', 'resample': '"""BILINEAR"""'}), "(mask, size=x.shape[2], resample='BILINEAR')\n", (7535, 7579), True, 'import paddle.fluid as fluid\n'), ((7986, 8014), 'paddle.fluid.dygraph.Linear', 'Linear', (['(512)', '(512)'], {'act': '"""relu"""'}), "(512, 512, act='relu')\n", (7992, 8014), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((9224, 9241), 'numpy.log2', 'np.log2', (['img_size'], {}), '(img_size)\n', (9231, 9241), True, 'import numpy as np\n'), ((9737, 9763), 'paddle.fluid.dygraph.Linear', 'Linear', (['dim_out', 'style_dim'], {}), '(dim_out, style_dim)\n', (9743, 9763), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((10560, 10577), 'numpy.log2', 'np.log2', (['img_size'], {}), '(img_size)\n', (10567, 10577), True, 'import numpy as np\n'), ((4332, 4385), 'numpy.array', 'np.array', (['[[-1, -1, -1], [-1, 8.0, -1], [-1, -1, -1]]'], {}), '([[-1, -1, -1], [-1, 8.0, -1], [-1, -1, -1]])\n', (4340, 4385), True, 'import numpy as np\n'), ((8278, 8306), 'paddle.fluid.dygraph.Linear', 'Linear', (['(512)', '(512)'], {'act': '"""relu"""'}), "(512, 512, act='relu')\n", (8284, 8306), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((8328, 8356), 'paddle.fluid.dygraph.Linear', 'Linear', (['(512)', '(512)'], {'act': '"""relu"""'}), "(512, 512, act='relu')\n", (8334, 8356), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((8378, 8406), 'paddle.fluid.dygraph.Linear', 'Linear', (['(512)', '(512)'], {'act': '"""relu"""'}), "(512, 512, act='relu')\n", (8384, 8406), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n'), ((8428, 8460), 'paddle.fluid.dygraph.Linear', 'Linear', (['(512)', 'style_dim'], {'act': 'None'}), '(512, style_dim, act=None)\n', (8434, 8460), False, 'from paddle.fluid.dygraph import to_variable, Conv2D, InstanceNorm, Linear\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Union
import numpy as np
from scipy import stats
from ..common.typetools import ArrayLike
from . import sequences
from . import base
from .base import IntOrParameter
from . import utils
# In some cases we will need the average of the k best.
def avg_of_k_best(archive: utils.Archive[utils.Value]) -> ArrayLike:
# Operator inspired by the work of <NAME>, <NAME>, <NAME>, <NAME>.
items = list(archive.items_as_arrays())
dimension = len(items[0][0])
k = min(len(archive) // 4, dimension) # fteytaud heuristic.
k = 1 if k < 1 else k
# Wasted time.
first_k_individuals = [k for k in sorted(items, key=lambda indiv: archive[indiv[0]].get_estimation("pessimistic"))[:k]]
assert len(first_k_individuals) == k
return np.array(sum(p[0] for p in first_k_individuals) / k)
# # # # # classes of optimizers # # # # #
class OneShotOptimizer(base.Optimizer):
# pylint: disable=abstract-method
one_shot = True
# Recentering or center-based counterparts of the original Nevergrad oneshot optimizers:
# - Quasi-opposite counterpart of a sampling = one sample out of 2 is the symmetric of the previous one,
# multiplied by rand([0,1]).
# - Opposite counterpart of a sampling = one sample out of 2 is the symmetric of the previous one.
# - PlusMiddlePoint counterpart of a sampling: we add (0,0,...,0) as a first point.
# Useful in high dim.
# - Some variants use a rescaling depending on the budget and the dimension.
# # # # # One-shot optimizers: all fitness evaluations are in parallel. # # # # #
class _RandomSearch(OneShotOptimizer):
def __init__(self, parametrization: IntOrParameter, budget: Optional[int] = None, num_workers: int = 1) -> None:
super().__init__(parametrization, budget=budget, num_workers=num_workers)
self._parameters = RandomSearchMaker() # updated by the parametrized family
self._opposable_data: Optional[np.ndarray] = None
def _internal_ask(self) -> ArrayLike:
# pylint: disable=not-callable
mode = self._parameters.opposition_mode
if self._opposable_data is not None and mode is not None:
data = self._opposable_data
data *= -(self._rng.uniform(0.0, 1.0) if mode == "quasi" else 1.0)
self._opposable_data = None
return data
if self._parameters.middle_point and not self._num_ask:
self._opposable_data = np.zeros(self.dimension)
return self._opposable_data # type: ignore
scale = self._parameters.scale
if isinstance(scale, str) and scale == "auto":
# Some variants use a rescaling depending on the budget and the dimension.
scale = (1 + np.log(self.budget)) / (4 * np.log(self.dimension))
if isinstance(scale, str) and scale == "random":
scale = np.exp(self._rng.normal(0., 1.) - 2.) / np.sqrt(self.dimension)
point = (self._rng.standard_cauchy(self.dimension) if self._parameters.cauchy
else self._rng.normal(0, 1, self.dimension))
self._opposable_data = scale * point
return self._opposable_data # type: ignore
def _internal_provide_recommendation(self) -> ArrayLike:
if self._parameters.stupid:
return self._internal_ask()
if self._parameters.recommendation_rule == "average_of_best":
return avg_of_k_best(self.archive)
return super()._internal_provide_recommendation()
class RandomSearchMaker(base.ParametrizedFamily):
"""Provides random suggestions.
Parameters
----------
stupid: bool
Provides a random recommendation instead of the best point so far (for baseline)
middle_point: bool
enforces that the first suggested point (ask) is zero.
opposition_mode: str or None
symmetrizes exploration wrt the center: (e.g. https://ieeexplore.ieee.org/document/4424748)
- full symmetry if "opposite"
- random * symmetric if "quasi"
cauchy: bool
use a Cauchy distribution instead of Gaussian distribution
scale: float or "random"
scalar for multiplying the suggested point values, or string:
- "random": uses a randomized pattern for the scale.
- "auto": scales in function of dimension and budget (see XXX)
recommendation_rule: str
"average_of_best" or "pessimistic"; "pessimistic" is the default and implies selecting the pessimistic best.
"""
_optimizer_class = _RandomSearch
one_shot = True
# pylint: disable=unused-argument
def __init__(self, *, middle_point: bool = False, stupid: bool = False,
opposition_mode: Optional[str] = None,
cauchy: bool = False, scale: Union[float, str] = 1.,
recommendation_rule: str = "pessimistic") -> None:
# keep all parameters and set initialize superclass for print
assert opposition_mode is None or opposition_mode in ["quasi", "opposite"]
assert isinstance(scale, (int, float)) or scale in ["auto", "random"]
self.middle_point = middle_point
self.opposition_mode = opposition_mode
self.stupid = stupid
self.recommendation_rule = recommendation_rule
self.cauchy = cauchy
self.scale = scale
super().__init__()
Zero = RandomSearchMaker(scale=0.).with_name("Zero", register=True)
RandomSearch = RandomSearchMaker().with_name("RandomSearch", register=True)
QORandomSearch = RandomSearchMaker(opposition_mode="quasi").with_name("QORandomSearch", register=True)
ORandomSearch = RandomSearchMaker(opposition_mode="opposite").with_name("ORandomSearch", register=True)
RandomSearchPlusMiddlePoint = RandomSearchMaker(middle_point=True).with_name("RandomSearchPlusMiddlePoint", register=True)
LargerScaleRandomSearchPlusMiddlePoint = RandomSearchMaker(
middle_point=True, scale=500.).with_name("LargerScaleRandomSearchPlusMiddlePoint", register=True)
SmallScaleRandomSearchPlusMiddlePoint = RandomSearchMaker(
middle_point=True, scale=.01).with_name("SmallScaleRandomSearchPlusMiddlePoint", register=True)
StupidRandom = RandomSearchMaker(stupid=True).with_name("StupidRandom", register=True)
CauchyRandomSearch = RandomSearchMaker(cauchy=True).with_name("CauchyRandomSearch", register=True)
RandomScaleRandomSearch = RandomSearchMaker(
scale="random", middle_point=True).with_name("RandomScaleRandomSearch", register=True)
RandomScaleRandomSearchPlusMiddlePoint = RandomSearchMaker(
scale="random", middle_point=True).with_name("RandomScaleRandomSearchPlusMiddlePoint", register=True)
class _SamplingSearch(OneShotOptimizer):
def __init__(self, parametrization: IntOrParameter, budget: Optional[int] = None, num_workers: int = 1) -> None:
super().__init__(parametrization, budget=budget, num_workers=num_workers)
self._parameters = SamplingSearch() # updated by the parametrized family
self._sampler_instance: Optional[sequences.Sampler] = None
self._rescaler: Optional[sequences.Rescaler] = None
self._opposable_data: Optional[np.ndarray] = None
@property
def sampler(self) -> sequences.Sampler:
if self._sampler_instance is None:
budget = None if self.budget is None else self.budget - self._parameters.middle_point
samplers = {"Halton": sequences.HaltonSampler,
"Hammersley": sequences.HammersleySampler,
"LHS": sequences.LHSSampler,
}
internal_budget = (budget + 1) // 2 if budget and (self._parameters == "quasi" or self._parameters == "opposite") else budget
self._sampler_instance = samplers[self._parameters.sampler](
self.dimension, internal_budget, scrambling=self._parameters.scrambled, random_state=self._rng)
assert self._sampler_instance is not None
if self._parameters.rescaled:
self._rescaler = sequences.Rescaler(self.sampler)
self._sampler_instance.reinitialize() # sampler was consumed by the scaler
return self._sampler_instance
def _internal_ask(self) -> ArrayLike:
# pylint: disable=not-callable
if self._parameters.middle_point and not self._num_ask:
return np.zeros(self.dimension) # type: ignore
mode = self._parameters.opposition_mode
if self._opposable_data is not None and mode is not None:
# weird mypy error, revealed as array, but not accepting substraction
data = self._opposable_data
data *= -(self._rng.uniform(0.0, 1.0) if mode == "quasi" else 1.0)
self._opposable_data = None
return data
sample = self.sampler()
if self._rescaler is not None:
sample = self._rescaler.apply(sample)
if self._parameters.autorescale:
self._parameters.scale = (1 + np.log(self.budget)) / (4 * np.log(self.dimension))
self._opposable_data = self._parameters.scale * (
stats.cauchy.ppf if self._parameters.cauchy else stats.norm.ppf)(sample)
assert self._opposable_data is not None
return self._opposable_data
def _internal_provide_recommendation(self) -> ArrayLike:
if self._parameters.recommendation_rule == "average_of_best":
return avg_of_k_best(self.archive)
return super()._internal_provide_recommendation()
# pylint: disable=too-many-instance-attributes
class SamplingSearch(base.ParametrizedFamily):
"""This is a one-shot optimization method, hopefully better than random search
by ensuring more uniformity.
Parameters
----------
sampler: str
Choice of the sampler among "Halton", "Hammersley" and "LHS".
scrambled: bool
Adds scrambling to the search; much better in high dimension and rarely worse
than the original search.
middle_point: bool
enforces that the first suggested point (ask) is zero.
cauchy: bool
use Cauchy inverse distribution instead of Gaussian when fitting points to real space
(instead of box).
scale: float or "random"
scalar for multiplying the suggested point values.
rescaled: bool
rescales the sampling pattern to reach the boundaries.
recommendation_rule: str
"average_of_best" or "pessimistic"; "pessimistic" is the default and implies selecting the pessimistic best.
Notes
-----
- Halton is a low quality sampling method when the dimension is high; it is usually better
to use Halton with scrambling.
- When the budget is known in advance, it is also better to replace Halton by Hammersley.
Basically the key difference with Halton is adding one coordinate evenly spaced
(the discrepancy is better).
budget, low discrepancy sequences (e.g. scrambled Hammersley) have a better discrepancy.
- Reference: Halton 1964: Algorithm 247: Radical-inverse quasi-random point sequence, ACM, p. 701.
adds scrambling to the Halton search; much better in high dimension and rarely worse
than the original Halton search.
- About Latin Hypercube Sampling (LHS):
Though partially incremental versions exist, this implementation needs the budget in advance.
This can be great in terms of discrepancy when the budget is not very high.
"""
one_shot = True
_optimizer_class = _SamplingSearch
# pylint: disable=unused-argument
def __init__(self, *, sampler: str = "Halton", scrambled: bool = False, middle_point: bool = False,
opposition_mode: Optional[str] = None,
cauchy: bool = False, autorescale: bool = False, scale: float = 1., rescaled: bool = False,
recommendation_rule: str = "pessimistic") -> None:
# keep all parameters and set initialize superclass for print
self.sampler = sampler
self.opposition_mode = opposition_mode
self.middle_point = middle_point
self.scrambled = scrambled
self.cauchy = cauchy
self.autorescale = autorescale
self.scale = scale
self.rescaled = rescaled
self.recommendation_rule = recommendation_rule
super().__init__()
# pylint: disable=line-too-long
HaltonSearch = SamplingSearch().with_name("HaltonSearch", register=True)
HaltonSearchPlusMiddlePoint = SamplingSearch(middle_point=True).with_name("HaltonSearchPlusMiddlePoint", register=True)
LargeHaltonSearch = SamplingSearch(scale=100.).with_name("LargeHaltonSearch", register=True)
LargeScrHaltonSearch = SamplingSearch(scale=100., scrambled=True).with_name("LargeScrHaltonSearch", register=True)
LargeHaltonSearchPlusMiddlePoint = SamplingSearch(
scale=100., middle_point=True).with_name("LargeHaltonSearchPlusMiddlePoint", register=True)
SmallHaltonSearchPlusMiddlePoint = SamplingSearch(
scale=.01, middle_point=True).with_name("SmallHaltonSearchPlusMiddlePoint", register=True)
ScrHaltonSearch = SamplingSearch(scrambled=True).with_name("ScrHaltonSearch", register=True)
ScrHaltonSearchPlusMiddlePoint = SamplingSearch(
middle_point=True, scrambled=True).with_name("ScrHaltonSearchPlusMiddlePoint", register=True)
LargeScrHaltonSearchPlusMiddlePoint = SamplingSearch(
scale=100., middle_point=True, scrambled=True).with_name("LargeScrHaltonSearchPlusMiddlePoint", register=True)
SmallScrHaltonSearchPlusMiddlePoint = SamplingSearch(
scale=.01, middle_point=True, scrambled=True).with_name("SmallScrHaltonSearchPlusMiddlePoint", register=True)
HammersleySearch = SamplingSearch(sampler="Hammersley").with_name("HammersleySearch", register=True)
HammersleySearchPlusMiddlePoint = SamplingSearch(
sampler="Hammersley", middle_point=True).with_name("HammersleySearchPlusMiddlePoint", register=True)
LargeHammersleySearchPlusMiddlePoint = SamplingSearch(
scale=100., sampler="Hammersley", middle_point=True).with_name("LargeHammersleySearchPlusMiddlePoint", register=True)
SmallHammersleySearchPlusMiddlePoint = SamplingSearch(
scale=.01, sampler="Hammersley", middle_point=True).with_name("SmallHammersleySearchPlusMiddlePoint", register=True)
LargeScrHammersleySearchPlusMiddlePoint = SamplingSearch(
scrambled=True, scale=100., sampler="Hammersley", middle_point=True).with_name("LargeScrHammersleySearchPlusMiddlePoint", register=True)
SmallScrHammersleySearchPlusMiddlePoint = SamplingSearch(
scrambled=True, scale=.01, sampler="Hammersley", middle_point=True).with_name("SmallScrHammersleySearchPlusMiddlePoint", register=True)
ScrHammersleySearchPlusMiddlePoint = SamplingSearch(
scrambled=True, sampler="Hammersley", middle_point=True).with_name("ScrHammersleySearchPlusMiddlePoint", register=True)
LargeHammersleySearch = SamplingSearch(scale=100., sampler="Hammersley").with_name("LargeHammersleySearch", register=True)
LargeScrHammersleySearch = SamplingSearch(
scale=100., sampler="Hammersley", scrambled=True).with_name("LargeScrHammersleySearch", register=True)
ScrHammersleySearch = SamplingSearch(sampler="Hammersley", scrambled=True).with_name("ScrHammersleySearch", register=True)
QOScrHammersleySearch = SamplingSearch(sampler="Hammersley", scrambled=True,
opposition_mode="quasi").with_name("QOScrHammersleySearch", register=True)
OScrHammersleySearch = SamplingSearch(sampler="Hammersley", scrambled=True,
opposition_mode="opposite").with_name("OScrHammersleySearch", register=True)
RescaleScrHammersleySearch = SamplingSearch(
sampler="Hammersley", scrambled=True, rescaled=True).with_name("RescaleScrHammersleySearch", register=True)
CauchyScrHammersleySearch = SamplingSearch(
cauchy=True, sampler="Hammersley", scrambled=True).with_name("CauchyScrHammersleySearch", register=True)
LHSSearch = SamplingSearch(sampler="LHS").with_name("LHSSearch", register=True)
CauchyLHSSearch = SamplingSearch(sampler="LHS", cauchy=True).with_name("CauchyLHSSearch", register=True)
AvgHaltonSearch = SamplingSearch(recommendation_rule="average_of_best").with_name("AvgHaltonSearch", register=True)
AvgHaltonSearchPlusMiddlePoint = SamplingSearch(middle_point=True, recommendation_rule="average_of_best").with_name(
"AvgHaltonSearchPlusMiddlePoint", register=True)
AvgLargeHaltonSearch = SamplingSearch(scale=100., recommendation_rule="average_of_best").with_name("AvgLargeHaltonSearch", register=True)
AvgLargeScrHaltonSearch = SamplingSearch(scale=100., scrambled=True, recommendation_rule="average_of_best").with_name(
"AvgLargeScrHaltonSearch", register=True)
AvgLargeHaltonSearchPlusMiddlePoint = SamplingSearch(
scale=100., middle_point=True, recommendation_rule="average_of_best").with_name("AvgLargeHaltonSearchPlusMiddlePoint", register=True)
AvgSmallHaltonSearchPlusMiddlePoint = SamplingSearch(
scale=.01, middle_point=True, recommendation_rule="average_of_best").with_name("AvgSmallHaltonSearchPlusMiddlePoint", register=True)
AvgScrHaltonSearch = SamplingSearch(scrambled=True, recommendation_rule="average_of_best").with_name("AvgScrHaltonSearch", register=True)
AvgScrHaltonSearchPlusMiddlePoint = SamplingSearch(
middle_point=True, scrambled=True, recommendation_rule="average_of_best").with_name("AvgScrHaltonSearchPlusMiddlePoint", register=True)
AvgLargeScrHaltonSearchPlusMiddlePoint = SamplingSearch(
scale=100., middle_point=True, scrambled=True, recommendation_rule="average_of_best").with_name("AvgLargeScrHaltonSearchPlusMiddlePoint", register=True)
AvgSmallScrHaltonSearchPlusMiddlePoint = SamplingSearch(
scale=.01, middle_point=True, scrambled=True, recommendation_rule="average_of_best").with_name("AvgSmallScrHaltonSearchPlusMiddlePoint", register=True)
AvgHammersleySearch = SamplingSearch(sampler="Hammersley", recommendation_rule="average_of_best").with_name(
"AvgHammersleySearch", register=True)
AvgHammersleySearchPlusMiddlePoint = SamplingSearch(
sampler="Hammersley", middle_point=True, recommendation_rule="average_of_best").with_name("AvgHammersleySearchPlusMiddlePoint", register=True)
AvgLargeHammersleySearchPlusMiddlePoint = SamplingSearch(
scale=100., sampler="Hammersley", middle_point=True, recommendation_rule="average_of_best").with_name("AvgLargeHammersleySearchPlusMiddlePoint", register=True)
AvgSmallHammersleySearchPlusMiddlePoint = SamplingSearch(
scale=.01, sampler="Hammersley", middle_point=True, recommendation_rule="average_of_best").with_name("AvgSmallHammersleySearchPlusMiddlePoint", register=True)
AvgLargeScrHammersleySearchPlusMiddlePoint = SamplingSearch(
scrambled=True, scale=100., sampler="Hammersley", middle_point=True, recommendation_rule="average_of_best").with_name("AvgLargeScrHammersleySearchPlusMiddlePoint", register=True)
AvgSmallScrHammersleySearchPlusMiddlePoint = SamplingSearch(
scrambled=True, scale=.01, sampler="Hammersley", middle_point=True, recommendation_rule="average_of_best").with_name("AvgSmallScrHammersleySearchPlusMiddlePoint", register=True)
AvgScrHammersleySearchPlusMiddlePoint = SamplingSearch(
scrambled=True, sampler="Hammersley", middle_point=True, recommendation_rule="average_of_best").with_name("AvgScrHammersleySearchPlusMiddlePoint", register=True)
AvgLargeHammersleySearch = SamplingSearch(scale=100., sampler="Hammersley",
recommendation_rule="average_of_best").with_name("AvgLargeHammersleySearch", register=True)
AvgLargeScrHammersleySearch = SamplingSearch(
scale=100., sampler="Hammersley", scrambled=True, recommendation_rule="average_of_best").with_name("AvgLargeScrHammersleySearch", register=True)
AvgScrHammersleySearch = SamplingSearch(sampler="Hammersley", scrambled=True,
recommendation_rule="average_of_best").with_name("AvgScrHammersleySearch", register=True)
AvgRescaleScrHammersleySearch = SamplingSearch(
sampler="Hammersley", scrambled=True, rescaled=True, recommendation_rule="average_of_best").with_name("AvgRescaleScrHammersleySearch", register=True)
AvgCauchyScrHammersleySearch = SamplingSearch(
cauchy=True, sampler="Hammersley", scrambled=True, recommendation_rule="average_of_best").with_name("AvgCauchyScrHammersleySearch", register=True)
AvgLHSSearch = SamplingSearch(sampler="LHS", recommendation_rule="average_of_best").with_name("AvgLHSSearch", register=True)
AvgCauchyLHSSearch = SamplingSearch(sampler="LHS", cauchy=True, recommendation_rule="average_of_best").with_name(
"AvgCauchyLHSSearch", register=True)
| [
"numpy.zeros",
"numpy.log",
"numpy.sqrt"
] | [((2628, 2652), 'numpy.zeros', 'np.zeros', (['self.dimension'], {}), '(self.dimension)\n', (2636, 2652), True, 'import numpy as np\n'), ((8502, 8526), 'numpy.zeros', 'np.zeros', (['self.dimension'], {}), '(self.dimension)\n', (8510, 8526), True, 'import numpy as np\n'), ((3084, 3107), 'numpy.sqrt', 'np.sqrt', (['self.dimension'], {}), '(self.dimension)\n', (3091, 3107), True, 'import numpy as np\n'), ((2915, 2934), 'numpy.log', 'np.log', (['self.budget'], {}), '(self.budget)\n', (2921, 2934), True, 'import numpy as np\n'), ((2943, 2965), 'numpy.log', 'np.log', (['self.dimension'], {}), '(self.dimension)\n', (2949, 2965), True, 'import numpy as np\n'), ((9126, 9145), 'numpy.log', 'np.log', (['self.budget'], {}), '(self.budget)\n', (9132, 9145), True, 'import numpy as np\n'), ((9154, 9176), 'numpy.log', 'np.log', (['self.dimension'], {}), '(self.dimension)\n', (9160, 9176), True, 'import numpy as np\n')] |
"""
auralib module containing objects and functions to read data from an auralib
project structure.
Author: <NAME>
Created: 25-Mar-2016
Last Mod: 17-Aug-2016
"""
import os
import numpy as np
def create(projpath):
"""
Function to create a new empty project directory structure with the
appropriate files initialized.
"""
# Create the top-level project directory
os.mkdir(projpath)
# Create the project sub-directories
os.mkdir(os.path.join(projpath, 'min'))
os.mkdir(os.path.join(projpath, 'seis'))
os.mkdir(os.path.join(projpath, 'seis', '2D'))
os.mkdir(os.path.join(projpath, 'seis', '3D'))
os.mkdir(os.path.join(projpath, 'well'))
os.mkdir(os.path.join(projpath, 'wvlt'))
os.mkdir(os.path.join(projpath, 'zone'))
# Create the initial well_list.txt file
head = "WELL,KB,GRD,X,Y"
fd = open(os.path.join(projpath, 'well_list.txt'), 'w')
fd.write(head)
fd.close()
def get_well_heads(projpath):
"""
Function to read the list of well headers into memory.
"""
fpath = os.path.join(projpath, 'well_list.csv')
fd = open(fpath, 'r')
fd.close()
def write_blank_log(filename, nsamp, z0, dz, ltype='Misc'):
"""
Function to write a blank log.
"""
z1 = nsamp*dz + z0
zref = np.arange(z0, z1, dz)
data = zref * 0.0
fd = open(filename, 'w')
# write header
fd.write('TYPE:%s\n' % ltype)
fd.write('START:%f\n' % z0)
fd.write('STEP:%f\n' % dz)
# write log digits
for i in data:
buf = '%f\n' % i
fd.write(buf)
fd.close()
| [
"os.mkdir",
"os.path.join",
"numpy.arange"
] | [((419, 437), 'os.mkdir', 'os.mkdir', (['projpath'], {}), '(projpath)\n', (427, 437), False, 'import os\n'), ((1123, 1162), 'os.path.join', 'os.path.join', (['projpath', '"""well_list.csv"""'], {}), "(projpath, 'well_list.csv')\n", (1135, 1162), False, 'import os\n'), ((1377, 1398), 'numpy.arange', 'np.arange', (['z0', 'z1', 'dz'], {}), '(z0, z1, dz)\n', (1386, 1398), True, 'import numpy as np\n'), ((500, 529), 'os.path.join', 'os.path.join', (['projpath', '"""min"""'], {}), "(projpath, 'min')\n", (512, 529), False, 'import os\n'), ((545, 575), 'os.path.join', 'os.path.join', (['projpath', '"""seis"""'], {}), "(projpath, 'seis')\n", (557, 575), False, 'import os\n'), ((591, 627), 'os.path.join', 'os.path.join', (['projpath', '"""seis"""', '"""2D"""'], {}), "(projpath, 'seis', '2D')\n", (603, 627), False, 'import os\n'), ((643, 679), 'os.path.join', 'os.path.join', (['projpath', '"""seis"""', '"""3D"""'], {}), "(projpath, 'seis', '3D')\n", (655, 679), False, 'import os\n'), ((695, 725), 'os.path.join', 'os.path.join', (['projpath', '"""well"""'], {}), "(projpath, 'well')\n", (707, 725), False, 'import os\n'), ((741, 771), 'os.path.join', 'os.path.join', (['projpath', '"""wvlt"""'], {}), "(projpath, 'wvlt')\n", (753, 771), False, 'import os\n'), ((787, 817), 'os.path.join', 'os.path.join', (['projpath', '"""zone"""'], {}), "(projpath, 'zone')\n", (799, 817), False, 'import os\n'), ((915, 954), 'os.path.join', 'os.path.join', (['projpath', '"""well_list.txt"""'], {}), "(projpath, 'well_list.txt')\n", (927, 954), False, 'import os\n')] |
import numpy as np
import nnfs
from nnfs.datasets import spiral_data
nnfs.init()
# Dense layer
class Layer_Dense:
# Layer initialization
def __init__(self, n_inputs, n_neurons,
weight_regularizer_l1=0, weight_regularizer_l2=0,
bias_regularizer_l1=0, bias_regularizer_l2=0):
# Initialize weights and biases
self.weights = 0.01 * np.random.randn(n_inputs, n_neurons)
self.biases = np.zeros((1, n_neurons))
# Set regularization strength
self.weight_regularizer_l1 = weight_regularizer_l1
self.weight_regularizer_l2 = weight_regularizer_l2
self.bias_regularizer_l1 = bias_regularizer_l1
self.bias_regularizer_l2 = bias_regularizer_l2
# Forward pass
def forward(self, inputs):
# Remember input values
self.inputs = inputs
# Calculate output values from inputs, weights and biases
self.output = np.dot(inputs, self.weights) + self.biases
# Backward pass
def backward(self, dvalues):
# Gradients on parameters
self.dweights = np.dot(self.inputs.T, dvalues)
self.dbiases = np.sum(dvalues, axis=0, keepdims=True)
# Gradients on regularization
# L1 on weights
if self.weight_regularizer_l1 > 0:
dL1 = np.ones_like(self.weights)
dL1[self.weights < 0] = -1
self.dweights += self.weight_regularizer_l1 * dL1
# L2 on weights
if self.weight_regularizer_l2 > 0:
self.dweights += 2 * self.weight_regularizer_l2 * \
self.weights
# L1 on biases
if self.bias_regularizer_l1 > 0:
dL1 = np.ones_like(self.biases)
dL1[self.biases < 0] = -1
self.dbiases += self.bias_regularizer_l1 * dL1
# L2 on biases
if self.bias_regularizer_l2 > 0:
self.dbiases += 2 * self.bias_regularizer_l2 * \
self.biases
# Gradient on values
self.dinputs = np.dot(dvalues, self.weights.T)
# Dropout
class Layer_Dropout:
# Init
def __init__(self, rate):
# Store rate, we invert it as for example for dropout
# of 0.1 we need success rate of 0.9
self.rate = 1 - rate
# Forward pass
def forward(self, inputs):
# Save input values
self.inputs = inputs
# Generate and save scaled mask
self.binary_mask = np.random.binomial(1, self.rate,
size=inputs.shape) / self.rate
# Apply mask to output values
self.output = inputs * self.binary_mask
# Backward pass
def backward(self, dvalues):
# Gradient on values
self.dinputs = dvalues * self.binary_mask
# ReLU activation
class Activation_ReLU:
# Forward pass
def forward(self, inputs):
# Remember input values
self.inputs = inputs
# Calculate output values from inputs
self.output = np.maximum(0, inputs)
# Backward pass
def backward(self, dvalues):
# Since we need to modify original variable,
# let's make a copy of values first
self.dinputs = dvalues.copy()
# Zero gradient where input values were negative
self.dinputs[self.inputs <= 0] = 0
# Softmax activation
class Activation_Softmax:
# Forward pass
def forward(self, inputs):
# Remember input values
self.inputs = inputs
# Get unnormalized probabilities
exp_values = np.exp(inputs - np.max(inputs, axis=1,
keepdims=True))
# Normalize them for each sample
probabilities = exp_values / np.sum(exp_values, axis=1,
keepdims=True)
self.output = probabilities
# Backward pass
def backward(self, dvalues):
# Create uninitialized array
self.dinputs = np.empty_like(dvalues)
# Enumerate outputs and gradients
for index, (single_output, single_dvalues) in \
enumerate(zip(self.output, dvalues)):
# Flatten output array
single_output = single_output.reshape(-1, 1)
# Calculate Jacobian matrix of the output
jacobian_matrix = np.diagflat(single_output) - \
np.dot(single_output, single_output.T)
# Calculate sample-wise gradient
# and add it to the array of sample gradients
self.dinputs[index] = np.dot(jacobian_matrix,
single_dvalues)
# SGD optimizer
class Optimizer_SGD:
# Initialize optimizer - set settings,
# learning rate of 1. is default for this optimizer
def __init__(self, learning_rate=1., decay=0., momentum=0.):
self.learning_rate = learning_rate
self.current_learning_rate = learning_rate
self.decay = decay
self.iterations = 0
self.momentum = momentum
# Call once before any parameter updates
def pre_update_params(self):
if self.decay:
self.current_learning_rate = self.learning_rate * \
(1. / (1. + self.decay * self.iterations))
# Update parameters
def update_params(self, layer):
# If we use momentum
if self.momentum:
# If layer does not contain momentum arrays, create them
# filled with zeros
if not hasattr(layer, 'weight_momentums'):
layer.weight_momentums = np.zeros_like(layer.weights)
# If there is no momentum array for weights
# The array doesn't exist for biases yet either.
layer.bias_momentums = np.zeros_like(layer.biases)
# Build weight updates with momentum - take previous
# updates multiplied by retain factor and update with
# current gradients
weight_updates = \
self.momentum * layer.weight_momentums - \
self.current_learning_rate * layer.dweights
layer.weight_momentums = weight_updates
# Build bias updates
bias_updates = \
self.momentum * layer.bias_momentums - \
self.current_learning_rate * layer.dbiases
layer.bias_momentums = bias_updates
# Vanilla SGD updates (as before momentum update)
else:
weight_updates = -self.current_learning_rate * \
layer.dweights
bias_updates = -self.current_learning_rate * \
layer.dbiases
# Update weights and biases using either
# vanilla or momentum updates
layer.weights += weight_updates
layer.biases += bias_updates
# Call once after any parameter updates
def post_update_params(self):
self.iterations += 1
# Adagrad optimizer
class Optimizer_Adagrad:
# Initialize optimizer - set settings
def __init__(self, learning_rate=1., decay=0., epsilon=1e-7):
self.learning_rate = learning_rate
self.current_learning_rate = learning_rate
self.decay = decay
self.iterations = 0
self.epsilon = epsilon
# Call once before any parameter updates
def pre_update_params(self):
if self.decay:
self.current_learning_rate = self.learning_rate * \
(1. / (1. + self.decay * self.iterations))
# Update parameters
def update_params(self, layer):
# If layer does not contain cache arrays,
# create them filled with zeros
if not hasattr(layer, 'weight_cache'):
layer.weight_cache = np.zeros_like(layer.weights)
layer.bias_cache = np.zeros_like(layer.biases)
# Update cache with squared current gradients
layer.weight_cache += layer.dweights**2
layer.bias_cache += layer.dbiases**2
# Vanilla SGD parameter update + normalization
# with square rooted cache
layer.weights += -self.current_learning_rate * \
layer.dweights / \
(np.sqrt(layer.weight_cache) + self.epsilon)
layer.biases += -self.current_learning_rate * \
layer.dbiases / \
(np.sqrt(layer.bias_cache) + self.epsilon)
# Call once after any parameter updates
def post_update_params(self):
self.iterations += 1
# RMSprop optimizer
class Optimizer_RMSprop:
# Initialize optimizer - set settings
def __init__(self, learning_rate=0.001, decay=0., epsilon=1e-7,
rho=0.9):
self.learning_rate = learning_rate
self.current_learning_rate = learning_rate
self.decay = decay
self.iterations = 0
self.epsilon = epsilon
self.rho = rho
# Call once before any parameter updates
def pre_update_params(self):
if self.decay:
self.current_learning_rate = self.learning_rate * \
(1. / (1. + self.decay * self.iterations))
# Update parameters
def update_params(self, layer):
# If layer does not contain cache arrays,
# create them filled with zeros
if not hasattr(layer, 'weight_cache'):
layer.weight_cache = np.zeros_like(layer.weights)
layer.bias_cache = np.zeros_like(layer.biases)
# Update cache with squared current gradients
layer.weight_cache = self.rho * layer.weight_cache + \
(1 - self.rho) * layer.dweights**2
layer.bias_cache = self.rho * layer.bias_cache + \
(1 - self.rho) * layer.dbiases**2
# Vanilla SGD parameter update + normalization
# with square rooted cache
layer.weights += -self.current_learning_rate * \
layer.dweights / \
(np.sqrt(layer.weight_cache) + self.epsilon)
layer.biases += -self.current_learning_rate * \
layer.dbiases / \
(np.sqrt(layer.bias_cache) + self.epsilon)
# Call once after any parameter updates
def post_update_params(self):
self.iterations += 1
# Adam optimizer
class Optimizer_Adam:
# Initialize optimizer - set settings
def __init__(self, learning_rate=0.001, decay=0., epsilon=1e-7,
beta_1=0.9, beta_2=0.999):
self.learning_rate = learning_rate
self.current_learning_rate = learning_rate
self.decay = decay
self.iterations = 0
self.epsilon = epsilon
self.beta_1 = beta_1
self.beta_2 = beta_2
# Call once before any parameter updates
def pre_update_params(self):
if self.decay:
self.current_learning_rate = self.learning_rate * \
(1. / (1. + self.decay * self.iterations))
# Update parameters
def update_params(self, layer):
# If layer does not contain cache arrays,
# create them filled with zeros
if not hasattr(layer, 'weight_cache'):
layer.weight_momentums = np.zeros_like(layer.weights)
layer.weight_cache = np.zeros_like(layer.weights)
layer.bias_momentums = np.zeros_like(layer.biases)
layer.bias_cache = np.zeros_like(layer.biases)
# Update momentum with current gradients
layer.weight_momentums = self.beta_1 * \
layer.weight_momentums + \
(1 - self.beta_1) * layer.dweights
layer.bias_momentums = self.beta_1 * \
layer.bias_momentums + \
(1 - self.beta_1) * layer.dbiases
# Get corrected momentum
# self.iteration is 0 at first pass
# and we need to start with 1 here
weight_momentums_corrected = layer.weight_momentums / \
(1 - self.beta_1 ** (self.iterations + 1))
bias_momentums_corrected = layer.bias_momentums / \
(1 - self.beta_1 ** (self.iterations + 1))
# Update cache with squared current gradients
layer.weight_cache = self.beta_2 * layer.weight_cache + \
(1 - self.beta_2) * layer.dweights**2
layer.bias_cache = self.beta_2 * layer.bias_cache + \
(1 - self.beta_2) * layer.dbiases**2
# Get corrected cache
weight_cache_corrected = layer.weight_cache / \
(1 - self.beta_2 ** (self.iterations + 1))
bias_cache_corrected = layer.bias_cache / \
(1 - self.beta_2 ** (self.iterations + 1))
# Vanilla SGD parameter update + normalization
# with square rooted cache
layer.weights += -self.current_learning_rate * \
weight_momentums_corrected / \
(np.sqrt(weight_cache_corrected) +
self.epsilon)
layer.biases += -self.current_learning_rate * \
bias_momentums_corrected / \
(np.sqrt(bias_cache_corrected) +
self.epsilon)
# Call once after any parameter updates
def post_update_params(self):
self.iterations += 1
# Common loss class
class Loss:
# Regularization loss calculation
def regularization_loss(self, layer):
# 0 by default
regularization_loss = 0
# L1 regularization - weights
# calculate only when factor greater than 0
if layer.weight_regularizer_l1 > 0:
regularization_loss += layer.weight_regularizer_l1 * \
np.sum(np.abs(layer.weights))
# L2 regularization - weights
if layer.weight_regularizer_l2 > 0:
regularization_loss += layer.weight_regularizer_l2 * \
np.sum(layer.weights * \
layer.weights)
# L1 regularization - biases
# calculate only when factor greater than 0
if layer.bias_regularizer_l1 > 0:
regularization_loss += layer.bias_regularizer_l1 * \
np.sum(np.abs(layer.biases))
# L2 regularization - biases
if layer.bias_regularizer_l2 > 0:
regularization_loss += layer.bias_regularizer_l2 * \
np.sum(layer.biases * \
layer.biases)
return regularization_loss
# Calculates the data and regularization losses
# given model output and ground truth values
def calculate(self, output, y):
# Calculate sample losses
sample_losses = self.forward(output, y)
# Calculate mean loss
data_loss = np.mean(sample_losses)
# Return loss
return data_loss
# Cross-entropy loss
class Loss_CategoricalCrossentropy(Loss):
# Forward pass
def forward(self, y_pred, y_true):
# Number of samples in a batch
samples = len(y_pred)
# Clip data to prevent division by 0
# Clip both sides to not drag mean towards any value
y_pred_clipped = np.clip(y_pred, 1e-7, 1 - 1e-7)
# Probabilities for target values -
# only if categorical labels
if len(y_true.shape) == 1:
correct_confidences = y_pred_clipped[
range(samples),
y_true
]
# Mask values - only for one-hot encoded labels
elif len(y_true.shape) == 2:
correct_confidences = np.sum(
y_pred_clipped * y_true,
axis=1
)
# Losses
negative_log_likelihoods = -np.log(correct_confidences)
return negative_log_likelihoods
# Backward pass
def backward(self, dvalues, y_true):
# Number of samples
samples = len(dvalues)
# Number of labels in every sample
# We'll use the first sample to count them
labels = len(dvalues[0])
# If labels are sparse, turn them into one-hot vector
if len(y_true.shape) == 1:
y_true = np.eye(labels)[y_true]
# Calculate gradient
self.dinputs = -y_true / dvalues
# Normalize gradient
self.dinputs = self.dinputs / samples
# Softmax classifier - combined Softmax activation
# and cross-entropy loss for faster backward step
class Activation_Softmax_Loss_CategoricalCrossentropy():
# Creates activation and loss function objects
def __init__(self):
self.activation = Activation_Softmax()
self.loss = Loss_CategoricalCrossentropy()
# Forward pass
def forward(self, inputs, y_true):
# Output layer's activation function
self.activation.forward(inputs)
# Set the output
self.output = self.activation.output
# Calculate and return loss value
return self.loss.calculate(self.output, y_true)
# Backward pass
def backward(self, dvalues, y_true):
# Number of samples
samples = len(dvalues)
# If labels are one-hot encoded,
# turn them into discrete values
if len(y_true.shape) == 2:
y_true = np.argmax(y_true, axis=1)
# Copy so we can safely modify
self.dinputs = dvalues.copy()
# Calculate gradient
self.dinputs[range(samples), y_true] -= 1
# Normalize gradient
self.dinputs = self.dinputs / samples
# Create dataset
X, y = spiral_data(samples=1000, classes=3)
# Create Dense layer with 2 input features and 64 output values
dense1 = Layer_Dense(2, 64, weight_regularizer_l2=5e-4,
bias_regularizer_l2=5e-4)
# Create ReLU activation (to be used with Dense layer):
activation1 = Activation_ReLU()
# Create dropout layer
dropout1 = Layer_Dropout(0.1)
# Create second Dense layer with 64 input features (as we take output
# of previous layer here) and 3 output values (output values)
dense2 = Layer_Dense(64, 3)
# Create Softmax classifier's combined loss and activation
loss_activation = Activation_Softmax_Loss_CategoricalCrossentropy()
# Create optimizer
optimizer = Optimizer_Adam(learning_rate=0.05, decay=5e-5)
# Train in loop
for epoch in range(10001):
# Perform a forward pass of our training data through this layer
dense1.forward(X)
# Perform a forward pass through activation function
# takes the output of first dense layer here
activation1.forward(dense1.output)
# Perform a forward pass through Dropout layer
dropout1.forward(activation1.output)
# Perform a forward pass through second Dense layer
# takes outputs of activation function of first layer as inputs
dense2.forward(dropout1.output)
# Perform a forward pass through the activation/loss function
# takes the output of second dense layer here and returns loss
data_loss = loss_activation.forward(dense2.output, y)
# Calculate regularization penalty
regularization_loss = \
loss_activation.loss.regularization_loss(dense1) + \
loss_activation.loss.regularization_loss(dense2)
# Calculate overall loss
loss = data_loss + regularization_loss
# Calculate accuracy from output of activation2 and targets
# calculate values along first axis
predictions = np.argmax(loss_activation.output, axis=1)
if len(y.shape) == 2:
y = np.argmax(y, axis=1)
accuracy = np.mean(predictions==y)
if not epoch % 100:
print(f'epoch: {epoch}, ' +
f'acc: {accuracy:.3f}, ' +
f'loss: {loss:.3f} (' +
f'data_loss: {data_loss:.3f}, ' +
f'reg_loss: {regularization_loss:.3f}), ' +
f'lr: {optimizer.current_learning_rate}')
# Backward pass
loss_activation.backward(loss_activation.output, y)
dense2.backward(loss_activation.dinputs)
dropout1.backward(dense2.dinputs)
activation1.backward(dropout1.dinputs)
dense1.backward(activation1.dinputs)
# Update weights and biases
optimizer.pre_update_params()
optimizer.update_params(dense1)
optimizer.update_params(dense2)
optimizer.post_update_params()
# Validate the model
# Create test dataset
X_test, y_test = spiral_data(samples=100, classes=3)
# Perform a forward pass of our testing data through this layer
dense1.forward(X_test)
# Perform a forward pass through activation function
# takes the output of first dense layer here
activation1.forward(dense1.output)
# Perform a forward pass through second Dense layer
# takes outputs of activation function of first layer as inputs
dense2.forward(activation1.output)
# Perform a forward pass through the activation/loss function
# takes the output of second dense layer here and returns loss
loss = loss_activation.forward(dense2.output, y_test)
# Calculate accuracy from output of activation2 and targets
# calculate values along first axis
predictions = np.argmax(loss_activation.output, axis=1)
if len(y_test.shape) == 2:
y_test = np.argmax(y_test, axis=1)
accuracy = np.mean(predictions==y_test)
print(f'validation, acc: {accuracy:.3f}, loss: {loss:.3f}')
'''
>>>
...
epoch: 9900, acc: 0.668, loss: 0.733 (data_loss: 0.717, reg_loss: 0.016), lr: 0.0334459346466437
epoch: 10000, acc: 0.688, loss: 0.727 (data_loss: 0.711, reg_loss: 0.016), lr: 0.03333444448148271
validation, acc: 0.757, loss: 0.712
'''
| [
"numpy.sum",
"numpy.maximum",
"numpy.abs",
"numpy.argmax",
"numpy.diagflat",
"numpy.clip",
"numpy.mean",
"numpy.zeros_like",
"numpy.random.randn",
"numpy.empty_like",
"numpy.max",
"numpy.ones_like",
"numpy.random.binomial",
"numpy.dot",
"numpy.log",
"nnfs.init",
"nnfs.datasets.spiral... | [((70, 81), 'nnfs.init', 'nnfs.init', ([], {}), '()\n', (79, 81), False, 'import nnfs\n'), ((17482, 17518), 'nnfs.datasets.spiral_data', 'spiral_data', ([], {'samples': '(1000)', 'classes': '(3)'}), '(samples=1000, classes=3)\n', (17493, 17518), False, 'from nnfs.datasets import spiral_data\n'), ((20247, 20282), 'nnfs.datasets.spiral_data', 'spiral_data', ([], {'samples': '(100)', 'classes': '(3)'}), '(samples=100, classes=3)\n', (20258, 20282), False, 'from nnfs.datasets import spiral_data\n'), ((20948, 20989), 'numpy.argmax', 'np.argmax', (['loss_activation.output'], {'axis': '(1)'}), '(loss_activation.output, axis=1)\n', (20957, 20989), True, 'import numpy as np\n'), ((21067, 21097), 'numpy.mean', 'np.mean', (['(predictions == y_test)'], {}), '(predictions == y_test)\n', (21074, 21097), True, 'import numpy as np\n'), ((19316, 19357), 'numpy.argmax', 'np.argmax', (['loss_activation.output'], {'axis': '(1)'}), '(loss_activation.output, axis=1)\n', (19325, 19357), True, 'import numpy as np\n'), ((19432, 19457), 'numpy.mean', 'np.mean', (['(predictions == y)'], {}), '(predictions == y)\n', (19439, 19457), True, 'import numpy as np\n'), ((21030, 21055), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (21039, 21055), True, 'import numpy as np\n'), ((449, 473), 'numpy.zeros', 'np.zeros', (['(1, n_neurons)'], {}), '((1, n_neurons))\n', (457, 473), True, 'import numpy as np\n'), ((1096, 1126), 'numpy.dot', 'np.dot', (['self.inputs.T', 'dvalues'], {}), '(self.inputs.T, dvalues)\n', (1102, 1126), True, 'import numpy as np\n'), ((1150, 1188), 'numpy.sum', 'np.sum', (['dvalues'], {'axis': '(0)', 'keepdims': '(True)'}), '(dvalues, axis=0, keepdims=True)\n', (1156, 1188), True, 'import numpy as np\n'), ((2037, 2068), 'numpy.dot', 'np.dot', (['dvalues', 'self.weights.T'], {}), '(dvalues, self.weights.T)\n', (2043, 2068), True, 'import numpy as np\n'), ((2988, 3009), 'numpy.maximum', 'np.maximum', (['(0)', 'inputs'], {}), '(0, inputs)\n', (2998, 3009), True, 'import numpy as np\n'), ((3940, 3962), 'numpy.empty_like', 'np.empty_like', (['dvalues'], {}), '(dvalues)\n', (3953, 3962), True, 'import numpy as np\n'), ((14752, 14774), 'numpy.mean', 'np.mean', (['sample_losses'], {}), '(sample_losses)\n', (14759, 14774), True, 'import numpy as np\n'), ((15149, 15182), 'numpy.clip', 'np.clip', (['y_pred', '(1e-07)', '(1 - 1e-07)'], {}), '(y_pred, 1e-07, 1 - 1e-07)\n', (15156, 15182), True, 'import numpy as np\n'), ((19396, 19416), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (19405, 19416), True, 'import numpy as np\n'), ((390, 426), 'numpy.random.randn', 'np.random.randn', (['n_inputs', 'n_neurons'], {}), '(n_inputs, n_neurons)\n', (405, 426), True, 'import numpy as np\n'), ((941, 969), 'numpy.dot', 'np.dot', (['inputs', 'self.weights'], {}), '(inputs, self.weights)\n', (947, 969), True, 'import numpy as np\n'), ((1313, 1339), 'numpy.ones_like', 'np.ones_like', (['self.weights'], {}), '(self.weights)\n', (1325, 1339), True, 'import numpy as np\n'), ((1696, 1721), 'numpy.ones_like', 'np.ones_like', (['self.biases'], {}), '(self.biases)\n', (1708, 1721), True, 'import numpy as np\n'), ((2455, 2506), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'self.rate'], {'size': 'inputs.shape'}), '(1, self.rate, size=inputs.shape)\n', (2473, 2506), True, 'import numpy as np\n'), ((3702, 3743), 'numpy.sum', 'np.sum', (['exp_values'], {'axis': '(1)', 'keepdims': '(True)'}), '(exp_values, axis=1, keepdims=True)\n', (3708, 3743), True, 'import numpy as np\n'), ((4529, 4568), 'numpy.dot', 'np.dot', (['jacobian_matrix', 'single_dvalues'], {}), '(jacobian_matrix, single_dvalues)\n', (4535, 4568), True, 'import numpy as np\n'), ((7692, 7720), 'numpy.zeros_like', 'np.zeros_like', (['layer.weights'], {}), '(layer.weights)\n', (7705, 7720), True, 'import numpy as np\n'), ((7752, 7779), 'numpy.zeros_like', 'np.zeros_like', (['layer.biases'], {}), '(layer.biases)\n', (7765, 7779), True, 'import numpy as np\n'), ((9309, 9337), 'numpy.zeros_like', 'np.zeros_like', (['layer.weights'], {}), '(layer.weights)\n', (9322, 9337), True, 'import numpy as np\n'), ((9369, 9396), 'numpy.zeros_like', 'np.zeros_like', (['layer.biases'], {}), '(layer.biases)\n', (9382, 9396), True, 'import numpy as np\n'), ((11098, 11126), 'numpy.zeros_like', 'np.zeros_like', (['layer.weights'], {}), '(layer.weights)\n', (11111, 11126), True, 'import numpy as np\n'), ((11160, 11188), 'numpy.zeros_like', 'np.zeros_like', (['layer.weights'], {}), '(layer.weights)\n', (11173, 11188), True, 'import numpy as np\n'), ((11224, 11251), 'numpy.zeros_like', 'np.zeros_like', (['layer.biases'], {}), '(layer.biases)\n', (11237, 11251), True, 'import numpy as np\n'), ((11283, 11310), 'numpy.zeros_like', 'np.zeros_like', (['layer.biases'], {}), '(layer.biases)\n', (11296, 11310), True, 'import numpy as np\n'), ((15685, 15712), 'numpy.log', 'np.log', (['correct_confidences'], {}), '(correct_confidences)\n', (15691, 15712), True, 'import numpy as np\n'), ((17198, 17223), 'numpy.argmax', 'np.argmax', (['y_true'], {'axis': '(1)'}), '(y_true, axis=1)\n', (17207, 17223), True, 'import numpy as np\n'), ((3541, 3578), 'numpy.max', 'np.max', (['inputs'], {'axis': '(1)', 'keepdims': '(True)'}), '(inputs, axis=1, keepdims=True)\n', (3547, 3578), True, 'import numpy as np\n'), ((4292, 4318), 'numpy.diagflat', 'np.diagflat', (['single_output'], {}), '(single_output)\n', (4303, 4318), True, 'import numpy as np\n'), ((4353, 4391), 'numpy.dot', 'np.dot', (['single_output', 'single_output.T'], {}), '(single_output, single_output.T)\n', (4359, 4391), True, 'import numpy as np\n'), ((5536, 5564), 'numpy.zeros_like', 'np.zeros_like', (['layer.weights'], {}), '(layer.weights)\n', (5549, 5564), True, 'import numpy as np\n'), ((5729, 5756), 'numpy.zeros_like', 'np.zeros_like', (['layer.biases'], {}), '(layer.biases)\n', (5742, 5756), True, 'import numpy as np\n'), ((8146, 8173), 'numpy.sqrt', 'np.sqrt', (['layer.weight_cache'], {}), '(layer.weight_cache)\n', (8153, 8173), True, 'import numpy as np\n'), ((8313, 8338), 'numpy.sqrt', 'np.sqrt', (['layer.bias_cache'], {}), '(layer.bias_cache)\n', (8320, 8338), True, 'import numpy as np\n'), ((9885, 9912), 'numpy.sqrt', 'np.sqrt', (['layer.weight_cache'], {}), '(layer.weight_cache)\n', (9892, 9912), True, 'import numpy as np\n'), ((10052, 10077), 'numpy.sqrt', 'np.sqrt', (['layer.bias_cache'], {}), '(layer.bias_cache)\n', (10059, 10077), True, 'import numpy as np\n'), ((12821, 12852), 'numpy.sqrt', 'np.sqrt', (['weight_cache_corrected'], {}), '(weight_cache_corrected)\n', (12828, 12852), True, 'import numpy as np\n'), ((13034, 13063), 'numpy.sqrt', 'np.sqrt', (['bias_cache_corrected'], {}), '(bias_cache_corrected)\n', (13041, 13063), True, 'import numpy as np\n'), ((13840, 13877), 'numpy.sum', 'np.sum', (['(layer.weights * layer.weights)'], {}), '(layer.weights * layer.weights)\n', (13846, 13877), True, 'import numpy as np\n'), ((14364, 14399), 'numpy.sum', 'np.sum', (['(layer.biases * layer.biases)'], {}), '(layer.biases * layer.biases)\n', (14370, 14399), True, 'import numpy as np\n'), ((15545, 15584), 'numpy.sum', 'np.sum', (['(y_pred_clipped * y_true)'], {'axis': '(1)'}), '(y_pred_clipped * y_true, axis=1)\n', (15551, 15584), True, 'import numpy as np\n'), ((16121, 16135), 'numpy.eye', 'np.eye', (['labels'], {}), '(labels)\n', (16127, 16135), True, 'import numpy as np\n'), ((13632, 13653), 'numpy.abs', 'np.abs', (['layer.weights'], {}), '(layer.weights)\n', (13638, 13653), True, 'import numpy as np\n'), ((14162, 14182), 'numpy.abs', 'np.abs', (['layer.biases'], {}), '(layer.biases)\n', (14168, 14182), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# encoding: utf-8
# Subdirectory
# LIDAR-DTM-2M-SJ46
# Filenames like:
# sj4060_DTM_2m.asc,
# sj4{0-9}{60-69}_DTM_2m.asc
#ncols 500
#nrows 500
#xllcorner 340000
#yllcorner 360000
#cellsize 2
#NODATA_value -9999
from collections import OrderedDict
import json
import sys
import zipfile
import os
import numpy as np
from matplotlib import pyplot as plt
from bng_to_latlon import OSGB36toWGS84
import matplotlib
LOGLINE_TEMPLATE = OrderedDict([
('name', None),
('nrows', None),
('ncols', None),
('xllcorner', None),
('yllcorner', None),
('cellsize', None),
('NODATA_value', None),
])
# Primary grid: (S, T), (N, O), H going North 500km x 500km
# Secondary grid: A-Z (omitting I, 5x5) 100km x 100km
PRIMARY = {
"H": {"xorg": 0, "yorg":1400000},
"N": {"xorg": 0, "yorg":900000},
"O": {"xorg": 500000, "yorg":900000},
"S": {"xorg": 0, "yorg": 400000},
"T": {"xorg": 500000, "yorg": 400000},
}
SECONDARY = {
"A": {"xorg": 0, "yorg": 0},
"B": {"xorg": 100000, "yorg": 0},
"C": {"xorg": 200000, "yorg": 0},
"D": {"xorg": 300000, "yorg": 0},
"E": {"xorg": 400000, "yorg": 0},
"F": {"xorg": 0, "yorg": 100000},
"G": {"xorg": 100000, "yorg": 100000},
"H": {"xorg": 200000, "yorg": 100000},
"J": {"xorg": 300000, "yorg": 100000},
"K": {"xorg": 400000, "yorg": 100000},
"L": {"xorg": 0, "yorg": 200000},
"M": {"xorg": 100000, "yorg": 200000},
"N": {"xorg": 200000, "yorg": 200000},
"O": {"xorg": 300000, "yorg": 200000},
"P": {"xorg": 400000, "yorg": 200000},
"Q": {"xorg": 0, "yorg": 300000},
"R": {"xorg": 100000, "yorg":300000},
"S": {"xorg": 200000, "yorg":300000},
"T": {"xorg": 300000, "yorg":300000},
"U": {"xorg": 400000, "yorg":300000},
"V": {"xorg": 0, "yorg": 400000},
"W": {"xorg": 100000, "yorg": 400000},
"X": {"xorg": 200000, "yorg": 400000},
"Y": {"xorg": 300000, "yorg": 400000},
"Z": {"xorg": 400000, "yorg": 400000},
}
DATA_ROOT_DIR = "C:\\BigData\\defra-lidar\\"
if not os.path.exists(DATA_ROOT_DIR):
DATA_ROOT_DIR = os.getcwd()
DATA_DIR_TEMPLATE = os.path.join(DATA_ROOT_DIR, "LIDAR-DSM-2M-{OS_grid_cell}.zip")
DATA_FILE = ""
OS_GRID_SIZE = 10000.0
def main(argv=None):
global DATA_FILE
# Process commandline arguments
# If we were just going to process one tile then here would be the place to start
# Return a file list from process_arguments, adjust OS_GRID_SIZE, return os_grid_cell
DATA_FILE, os_grid_cell, name = process_arguments(argv)
# Report on datafiles
zf = zipfile.ZipFile(DATA_FILE, 'r')
datafiles = zf.namelist()
print("Data zip file: {}".format(DATA_FILE))
print("Found {} datafiles".format(len(datafiles)))
xorg, yorg = tile_origin(os_grid_cell)
# Calculate bounding box
lat_ll, lng_ll = OSGB36toWGS84(xorg, yorg) # lower left
lat_ur, lng_ur = OSGB36toWGS84(xorg + OS_GRID_SIZE, yorg + OS_GRID_SIZE)
bb = "[{}, {}], [{}, {}]".format(lat_ll, lng_ll, lat_ur, lng_ur)
print("Bounding box: {}".format(bb))
# Write bounding box to data_dict
write_to_data_dict(os_grid_cell, bb, name)
# Iterate over datafiles
filelist = [x for x in range(len(datafiles))]
# Assume all tiles in a dataset have the same ncols, nrows, cellsize and NODATA_value
metadata = get_header_info(datafiles[0])
# Output_data_size is OS_GRID_SIZE/cellsize
output_data_size = OS_GRID_SIZE / metadata["cellsize"]
bigdata = np.zeros((output_data_size, output_data_size), dtype=np.float)
for idx in filelist:
# Get data
metadata = get_header_info(datafiles[idx])
data = get_image(datafiles[idx], metadata["ncols"], metadata["nrows"])
data[data == -9999] = 0.0
# Calculate x,y offset
xoffset, yoffset = calculate_offsets(metadata, output_data_size, xorg, yorg)
width = metadata["ncols"]
height = metadata["nrows"]
# Write into array
bigdata[yoffset - height:yoffset, xoffset:xoffset + width] = data
# Show the data
plot_image(bigdata)
# Export the data to an image
filename = "images/" + os_grid_cell
matplotlib.image.imsave(filename, bigdata, cmap=plt.gray())
def write_to_data_dict(os_grid_cell, bb, name):
try:
with open('data_dict.json') as data_file:
data_dict = json.load(data_file)
except:
data_dict = {}
if os_grid_cell in data_dict.keys():
data_dict[os_grid_cell]["bb"] = bb
data_dict[os_grid_cell]["name"] = name
else:
data_dict[os_grid_cell] = {"bb": bb, "name": name}
with open('data_dict.json', 'w') as outfile:
json.dump(data_dict, outfile, sort_keys=True, indent=4)
def process_arguments(argv):
if argv is None:
argv = sys.argv
arg = argv[1:]
DATA_FILE = ""
os_grid_cell = ''
name = "None"
os_grid_cell = arg[0]
# If the first argument is short it's assumed to be of the form SJ46
# And that we are asking for a directory like LIDAR-DSM-2M-{OS_grid_cell}
# Otherwise we assume we are being given the full directory name
if len(arg[0]) == 4:
DATA_FILE = DATA_DIR_TEMPLATE.format(OS_grid_cell=os_grid_cell)
else:
DATA_FILE = DATA_ROOT_DIR + arg[0]
os_grid_cell = arg[0][-4:]
# If there is a second argument then it is a friendly name
if len(arg) == 2:
name = arg[1]
return DATA_FILE, os_grid_cell, name
def list_available_data():
print("Lookin' for data!")
def tile_origin(tile_code):
xorg = PRIMARY[tile_code[0]]["xorg"] + SECONDARY[tile_code[1]]["xorg"] + float(tile_code[2]) * OS_GRID_SIZE
yorg = PRIMARY[tile_code[0]]["yorg"] - SECONDARY[tile_code[1]]["yorg"] + float(tile_code[3]) * OS_GRID_SIZE
return xorg, yorg
def calculate_offsets(metadata, output_data_size, xorg, yorg):
xoffset = (metadata["xllcorner"] - xorg) / float(metadata["cellsize"])
yoffset = output_data_size - (metadata["yllcorner"] - yorg) / float(metadata["cellsize"])
return xoffset, yoffset
def plot_image(data):
plt.imshow(data, interpolation='nearest', cmap=plt.gray())
plt.axis('off')
plt.margins(0, 0, tight=True)
plt.show()
def get_image(filename, ncols, nrows):
data = np.zeros((ncols, nrows), dtype=np.float)
zf = zipfile.ZipFile(DATA_FILE)
with zf.open(filename) as f:
content = f.readlines()
idx = 0
for line in content:
line = line.decode("utf-8")
parts = line.split()
if len(parts) == ncols:
data[idx,] = [float(x) for x in parts]
idx = idx + 1
return data
def get_header_info(filename):
log_line = LOGLINE_TEMPLATE.copy()
zf = zipfile.ZipFile(DATA_FILE)
with zf.open(filename) as f:
content = [next(f) for x in range(7)]
log_line["name"] = filename
for line in content:
line = line.decode("utf-8")
parts = line.split()
#assert len(parts) in [1,2]
if len(parts) != 2:
break
elif len(parts) == 2:
if parts[0] == "nrows":
log_line["nrows"] = int(parts[1])
elif parts[0] == "ncols":
log_line["ncols"] = int(parts[1])
elif parts[0] == "xllcorner":
log_line["xllcorner"] = float(parts[1])
elif parts[0] == "yllcorner":
log_line["yllcorner"] = float(parts[1])
elif parts[0] == "cellsize":
log_line["cellsize"] = float(parts[1])
elif parts[0] == "NODATA_value":
log_line["NODATA_value"] = int(parts[1])
else:
print("Keyword not recognised: {}".format(parts[0]))
else:
print("Unexpected line length (not 2 or 500): {}".format(len(parts)))
return log_line
if __name__ == "__main__":
main()
| [
"json.dump",
"matplotlib.pyplot.gray",
"json.load",
"zipfile.ZipFile",
"matplotlib.pyplot.show",
"os.getcwd",
"matplotlib.pyplot.margins",
"os.path.exists",
"numpy.zeros",
"matplotlib.pyplot.axis",
"collections.OrderedDict",
"os.path.join",
"bng_to_latlon.OSGB36toWGS84"
] | [((484, 637), 'collections.OrderedDict', 'OrderedDict', (["[('name', None), ('nrows', None), ('ncols', None), ('xllcorner', None), (\n 'yllcorner', None), ('cellsize', None), ('NODATA_value', None)]"], {}), "([('name', None), ('nrows', None), ('ncols', None), ('xllcorner',\n None), ('yllcorner', None), ('cellsize', None), ('NODATA_value', None)])\n", (495, 637), False, 'from collections import OrderedDict\n'), ((2298, 2360), 'os.path.join', 'os.path.join', (['DATA_ROOT_DIR', '"""LIDAR-DSM-2M-{OS_grid_cell}.zip"""'], {}), "(DATA_ROOT_DIR, 'LIDAR-DSM-2M-{OS_grid_cell}.zip')\n", (2310, 2360), False, 'import os\n'), ((2215, 2244), 'os.path.exists', 'os.path.exists', (['DATA_ROOT_DIR'], {}), '(DATA_ROOT_DIR)\n', (2229, 2244), False, 'import os\n'), ((2266, 2277), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2275, 2277), False, 'import os\n'), ((2751, 2782), 'zipfile.ZipFile', 'zipfile.ZipFile', (['DATA_FILE', '"""r"""'], {}), "(DATA_FILE, 'r')\n", (2766, 2782), False, 'import zipfile\n'), ((3011, 3036), 'bng_to_latlon.OSGB36toWGS84', 'OSGB36toWGS84', (['xorg', 'yorg'], {}), '(xorg, yorg)\n', (3024, 3036), False, 'from bng_to_latlon import OSGB36toWGS84\n'), ((3071, 3126), 'bng_to_latlon.OSGB36toWGS84', 'OSGB36toWGS84', (['(xorg + OS_GRID_SIZE)', '(yorg + OS_GRID_SIZE)'], {}), '(xorg + OS_GRID_SIZE, yorg + OS_GRID_SIZE)\n', (3084, 3126), False, 'from bng_to_latlon import OSGB36toWGS84\n'), ((3662, 3724), 'numpy.zeros', 'np.zeros', (['(output_data_size, output_data_size)'], {'dtype': 'np.float'}), '((output_data_size, output_data_size), dtype=np.float)\n', (3670, 3724), True, 'import numpy as np\n'), ((6345, 6360), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6353, 6360), True, 'from matplotlib import pyplot as plt\n'), ((6365, 6394), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0)'], {'tight': '(True)'}), '(0, 0, tight=True)\n', (6376, 6394), True, 'from matplotlib import pyplot as plt\n'), ((6399, 6409), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6407, 6409), True, 'from matplotlib import pyplot as plt\n'), ((6461, 6501), 'numpy.zeros', 'np.zeros', (['(ncols, nrows)'], {'dtype': 'np.float'}), '((ncols, nrows), dtype=np.float)\n', (6469, 6501), True, 'import numpy as np\n'), ((6511, 6537), 'zipfile.ZipFile', 'zipfile.ZipFile', (['DATA_FILE'], {}), '(DATA_FILE)\n', (6526, 6537), False, 'import zipfile\n'), ((6938, 6964), 'zipfile.ZipFile', 'zipfile.ZipFile', (['DATA_FILE'], {}), '(DATA_FILE)\n', (6953, 6964), False, 'import zipfile\n'), ((4863, 4918), 'json.dump', 'json.dump', (['data_dict', 'outfile'], {'sort_keys': '(True)', 'indent': '(4)'}), '(data_dict, outfile, sort_keys=True, indent=4)\n', (4872, 4918), False, 'import json\n'), ((4399, 4409), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (4407, 4409), True, 'from matplotlib import pyplot as plt\n'), ((4547, 4567), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (4556, 4567), False, 'import json\n'), ((6329, 6339), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (6337, 6339), True, 'from matplotlib import pyplot as plt\n')] |
import numpy as np
import random
import matplotlib.pyplot as plt
def random_obstacle(x, y, max_size):
obstacle_size = random.randint(5,max_size)
obstacle_occupancy = [(x,y)]
for i in range(obstacle_size):
x += random.choice([1, 0, -1])
y += random.choice([1, 0, -1])
obstacle_occupancy.append((x,y))
return obstacle_occupancy
def random_maze(x_dimension, y_dimension, density):
oc_grid = []
if density == 'heavy':
num_obstacles = int( np.sqrt(x_dimension * y_dimension) )
max_obstacle_size = int( np.sqrt(x_dimension * y_dimension) )
elif density == 'medium':
num_obstacles = int( 0.75 * np.sqrt(x_dimension * y_dimension) )
max_obstacle_size = int( np.sqrt(x_dimension * y_dimension) )
elif density == 'light':
num_obstacles = int( 0.5 * np.sqrt(x_dimension * y_dimension) )
max_obstacle_size = int( np.sqrt(x_dimension * y_dimension) )
elif density == 'sparse':
num_obstacles = int( 0.25 * np.sqrt(x_dimension * y_dimension) )
max_obstacle_size = int( np.sqrt(x_dimension * y_dimension) )
start = (0,0)
end = (x_dimension - 1, y_dimension - 1)
for i in range(num_obstacles):
x = random.randint(1, x_dimension - 2)
y = random.randint(1, y_dimension - 2)
for i in random_obstacle(x,y, max_obstacle_size):
if 0 <= i[0] <= x_dimension - 2 and 0 <= i[1] <= y_dimension - 2:
oc_grid.append(i)
'''
Start and End positions are either in the corner or centre of the edge of the maze
Start and End are always on opposite edges of the maze
'''
waypoints = [0, int(x_dimension/2), x_dimension - 1]
start = (random.choice(waypoints), 0)
if start[0] != int(x_dimension/2): #Prevent the maze from generating start and end coordinates along the edge of the maze
del waypoints[waypoints.index(start[0])]
end = (random.choice(waypoints), y_dimension - 1)
return oc_grid, start, end
| [
"random.choice",
"random.randint",
"numpy.sqrt"
] | [((124, 151), 'random.randint', 'random.randint', (['(5)', 'max_size'], {}), '(5, max_size)\n', (138, 151), False, 'import random\n'), ((232, 257), 'random.choice', 'random.choice', (['[1, 0, -1]'], {}), '([1, 0, -1])\n', (245, 257), False, 'import random\n'), ((271, 296), 'random.choice', 'random.choice', (['[1, 0, -1]'], {}), '([1, 0, -1])\n', (284, 296), False, 'import random\n'), ((1232, 1266), 'random.randint', 'random.randint', (['(1)', '(x_dimension - 2)'], {}), '(1, x_dimension - 2)\n', (1246, 1266), False, 'import random\n'), ((1279, 1313), 'random.randint', 'random.randint', (['(1)', '(y_dimension - 2)'], {}), '(1, y_dimension - 2)\n', (1293, 1313), False, 'import random\n'), ((1716, 1740), 'random.choice', 'random.choice', (['waypoints'], {}), '(waypoints)\n', (1729, 1740), False, 'import random\n'), ((1932, 1956), 'random.choice', 'random.choice', (['waypoints'], {}), '(waypoints)\n', (1945, 1956), False, 'import random\n'), ((496, 530), 'numpy.sqrt', 'np.sqrt', (['(x_dimension * y_dimension)'], {}), '(x_dimension * y_dimension)\n', (503, 530), True, 'import numpy as np\n'), ((566, 600), 'numpy.sqrt', 'np.sqrt', (['(x_dimension * y_dimension)'], {}), '(x_dimension * y_dimension)\n', (573, 600), True, 'import numpy as np\n'), ((739, 773), 'numpy.sqrt', 'np.sqrt', (['(x_dimension * y_dimension)'], {}), '(x_dimension * y_dimension)\n', (746, 773), True, 'import numpy as np\n'), ((669, 703), 'numpy.sqrt', 'np.sqrt', (['(x_dimension * y_dimension)'], {}), '(x_dimension * y_dimension)\n', (676, 703), True, 'import numpy as np\n'), ((910, 944), 'numpy.sqrt', 'np.sqrt', (['(x_dimension * y_dimension)'], {}), '(x_dimension * y_dimension)\n', (917, 944), True, 'import numpy as np\n'), ((840, 874), 'numpy.sqrt', 'np.sqrt', (['(x_dimension * y_dimension)'], {}), '(x_dimension * y_dimension)\n', (847, 874), True, 'import numpy as np\n'), ((1083, 1117), 'numpy.sqrt', 'np.sqrt', (['(x_dimension * y_dimension)'], {}), '(x_dimension * y_dimension)\n', (1090, 1117), True, 'import numpy as np\n'), ((1013, 1047), 'numpy.sqrt', 'np.sqrt', (['(x_dimension * y_dimension)'], {}), '(x_dimension * y_dimension)\n', (1020, 1047), True, 'import numpy as np\n')] |
"""Test Gaussian MLP Policy."""
import pickle
import numpy as np
import pytest
import torch
from torch import nn
from garage.envs import GarageEnv
from garage.torch.policies import GaussianMLPPolicy
from tests.fixtures.envs.dummy import DummyBoxEnv
class TestGaussianMLPPolicies:
"""Class for Testing Gaussian MlP Policy."""
# yapf: disable
@pytest.mark.parametrize('hidden_sizes', [
(1, ), (2, ), (3, ), (1, 4), (3, 5)])
# yapf: enable
def test_get_action(self, hidden_sizes):
"""Test get_action function."""
env_spec = GarageEnv(DummyBoxEnv())
obs_dim = env_spec.observation_space.flat_dim
act_dim = env_spec.action_space.flat_dim
obs = torch.ones(obs_dim, dtype=torch.float32)
init_std = 2.
policy = GaussianMLPPolicy(env_spec=env_spec,
hidden_sizes=hidden_sizes,
init_std=init_std,
hidden_nonlinearity=None,
std_parameterization='exp',
hidden_w_init=nn.init.ones_,
output_w_init=nn.init.ones_)
dist = policy(obs)[0]
expected_mean = torch.full(
(act_dim, ), obs_dim * (torch.Tensor(hidden_sizes).prod().item()))
expected_variance = init_std**2
action, prob = policy.get_action(obs)
assert np.array_equal(prob['mean'], expected_mean.numpy())
assert dist.variance.equal(torch.full((act_dim, ), expected_variance))
assert action.shape == (act_dim, )
# yapf: disable
@pytest.mark.parametrize('hidden_sizes', [
(1, ), (2, ), (3, ), (1, 4), (3, 5)])
# yapf: enable
def test_get_action_np(self, hidden_sizes):
"""Test get_action function with numpy inputs."""
env_spec = GarageEnv(DummyBoxEnv())
obs_dim = env_spec.observation_space.flat_dim
act_dim = env_spec.action_space.flat_dim
obs = np.ones(obs_dim, dtype=np.float32)
init_std = 2.
policy = GaussianMLPPolicy(env_spec=env_spec,
hidden_sizes=hidden_sizes,
init_std=init_std,
hidden_nonlinearity=None,
std_parameterization='exp',
hidden_w_init=nn.init.ones_,
output_w_init=nn.init.ones_)
dist = policy(torch.from_numpy(obs))[0]
expected_mean = torch.full(
(act_dim, ), obs_dim * (torch.Tensor(hidden_sizes).prod().item()))
expected_variance = init_std**2
action, prob = policy.get_action(obs)
assert np.array_equal(prob['mean'], expected_mean.numpy())
assert dist.variance.equal(torch.full((act_dim, ), expected_variance))
assert action.shape == (act_dim, )
# yapf: disable
@pytest.mark.parametrize('batch_size, hidden_sizes', [
(1, (1, )),
(5, (3, )),
(8, (4, )),
(15, (1, 2)),
(30, (3, 4, 10)),
])
# yapf: enable
def test_get_actions(self, batch_size, hidden_sizes):
"""Test get_actions function."""
env_spec = GarageEnv(DummyBoxEnv())
obs_dim = env_spec.observation_space.flat_dim
act_dim = env_spec.action_space.flat_dim
obs = torch.ones([batch_size, obs_dim], dtype=torch.float32)
init_std = 2.
policy = GaussianMLPPolicy(env_spec=env_spec,
hidden_sizes=hidden_sizes,
init_std=init_std,
hidden_nonlinearity=None,
std_parameterization='exp',
hidden_w_init=nn.init.ones_,
output_w_init=nn.init.ones_)
dist = policy(obs)[0]
expected_mean = torch.full([batch_size, act_dim],
obs_dim *
(torch.Tensor(hidden_sizes).prod().item()))
expected_variance = init_std**2
action, prob = policy.get_actions(obs)
assert np.array_equal(prob['mean'], expected_mean.numpy())
assert dist.variance.equal(
torch.full((batch_size, act_dim), expected_variance))
assert action.shape == (batch_size, act_dim)
# yapf: disable
@pytest.mark.parametrize('batch_size, hidden_sizes', [
(1, (1, )),
(5, (3, )),
(8, (4, )),
(15, (1, 2)),
(30, (3, 4, 10)),
])
# yapf: enable
def test_get_actions_np(self, batch_size, hidden_sizes):
"""Test get_actions function with numpy inputs."""
env_spec = GarageEnv(DummyBoxEnv())
obs_dim = env_spec.observation_space.flat_dim
act_dim = env_spec.action_space.flat_dim
obs = np.ones((batch_size, obs_dim), dtype=np.float32)
init_std = 2.
policy = GaussianMLPPolicy(env_spec=env_spec,
hidden_sizes=hidden_sizes,
init_std=init_std,
hidden_nonlinearity=None,
std_parameterization='exp',
hidden_w_init=nn.init.ones_,
output_w_init=nn.init.ones_)
dist = policy(torch.from_numpy(obs))[0]
expected_mean = torch.full([batch_size, act_dim],
obs_dim *
(torch.Tensor(hidden_sizes).prod().item()))
expected_variance = init_std**2
action, prob = policy.get_actions(obs)
assert np.array_equal(prob['mean'], expected_mean.numpy())
assert dist.variance.equal(
torch.full((batch_size, act_dim), expected_variance))
assert action.shape == (batch_size, act_dim)
# yapf: disable
@pytest.mark.parametrize('batch_size, hidden_sizes', [
(1, (1, )),
(6, (3, )),
(11, (6, )),
(25, (3, 5)),
(34, (2, 10, 11)),
])
# yapf: enable
def test_is_pickleable(self, batch_size, hidden_sizes):
"""Test if policy is pickleable."""
env_spec = GarageEnv(DummyBoxEnv())
obs_dim = env_spec.observation_space.flat_dim
obs = torch.ones([batch_size, obs_dim], dtype=torch.float32)
init_std = 2.
policy = GaussianMLPPolicy(env_spec=env_spec,
hidden_sizes=hidden_sizes,
init_std=init_std,
hidden_nonlinearity=None,
std_parameterization='exp',
hidden_w_init=nn.init.ones_,
output_w_init=nn.init.ones_)
output1_action, output1_prob = policy.get_actions(obs)
p = pickle.dumps(policy)
policy_pickled = pickle.loads(p)
output2_action, output2_prob = policy_pickled.get_actions(obs)
assert np.array_equal(output1_prob['mean'], output2_prob['mean'])
assert output1_action.shape == output2_action.shape
| [
"pickle.loads",
"torch.ones",
"torch.from_numpy",
"numpy.ones",
"torch.full",
"torch.Tensor",
"garage.torch.policies.GaussianMLPPolicy",
"numpy.array_equal",
"pytest.mark.parametrize",
"tests.fixtures.envs.dummy.DummyBoxEnv",
"pickle.dumps"
] | [((359, 434), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""hidden_sizes"""', '[(1,), (2,), (3,), (1, 4), (3, 5)]'], {}), "('hidden_sizes', [(1,), (2,), (3,), (1, 4), (3, 5)])\n", (382, 434), False, 'import pytest\n'), ((1647, 1722), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""hidden_sizes"""', '[(1,), (2,), (3,), (1, 4), (3, 5)]'], {}), "('hidden_sizes', [(1,), (2,), (3,), (1, 4), (3, 5)])\n", (1670, 1722), False, 'import pytest\n'), ((2968, 3090), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_size, hidden_sizes"""', '[(1, (1,)), (5, (3,)), (8, (4,)), (15, (1, 2)), (30, (3, 4, 10))]'], {}), "('batch_size, hidden_sizes', [(1, (1,)), (5, (3,)),\n (8, (4,)), (15, (1, 2)), (30, (3, 4, 10))])\n", (2991, 3090), False, 'import pytest\n'), ((4466, 4588), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_size, hidden_sizes"""', '[(1, (1,)), (5, (3,)), (8, (4,)), (15, (1, 2)), (30, (3, 4, 10))]'], {}), "('batch_size, hidden_sizes', [(1, (1,)), (5, (3,)),\n (8, (4,)), (15, (1, 2)), (30, (3, 4, 10))])\n", (4489, 4588), False, 'import pytest\n'), ((5997, 6121), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_size, hidden_sizes"""', '[(1, (1,)), (6, (3,)), (11, (6,)), (25, (3, 5)), (34, (2, 10, 11))]'], {}), "('batch_size, hidden_sizes', [(1, (1,)), (6, (3,)),\n (11, (6,)), (25, (3, 5)), (34, (2, 10, 11))])\n", (6020, 6121), False, 'import pytest\n'), ((712, 752), 'torch.ones', 'torch.ones', (['obs_dim'], {'dtype': 'torch.float32'}), '(obs_dim, dtype=torch.float32)\n', (722, 752), False, 'import torch\n'), ((793, 996), 'garage.torch.policies.GaussianMLPPolicy', 'GaussianMLPPolicy', ([], {'env_spec': 'env_spec', 'hidden_sizes': 'hidden_sizes', 'init_std': 'init_std', 'hidden_nonlinearity': 'None', 'std_parameterization': '"""exp"""', 'hidden_w_init': 'nn.init.ones_', 'output_w_init': 'nn.init.ones_'}), "(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=\n init_std, hidden_nonlinearity=None, std_parameterization='exp',\n hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_)\n", (810, 996), False, 'from garage.torch.policies import GaussianMLPPolicy\n'), ((2021, 2055), 'numpy.ones', 'np.ones', (['obs_dim'], {'dtype': 'np.float32'}), '(obs_dim, dtype=np.float32)\n', (2028, 2055), True, 'import numpy as np\n'), ((2096, 2299), 'garage.torch.policies.GaussianMLPPolicy', 'GaussianMLPPolicy', ([], {'env_spec': 'env_spec', 'hidden_sizes': 'hidden_sizes', 'init_std': 'init_std', 'hidden_nonlinearity': 'None', 'std_parameterization': '"""exp"""', 'hidden_w_init': 'nn.init.ones_', 'output_w_init': 'nn.init.ones_'}), "(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=\n init_std, hidden_nonlinearity=None, std_parameterization='exp',\n hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_)\n", (2113, 2299), False, 'from garage.torch.policies import GaussianMLPPolicy\n'), ((3416, 3470), 'torch.ones', 'torch.ones', (['[batch_size, obs_dim]'], {'dtype': 'torch.float32'}), '([batch_size, obs_dim], dtype=torch.float32)\n', (3426, 3470), False, 'import torch\n'), ((3511, 3714), 'garage.torch.policies.GaussianMLPPolicy', 'GaussianMLPPolicy', ([], {'env_spec': 'env_spec', 'hidden_sizes': 'hidden_sizes', 'init_std': 'init_std', 'hidden_nonlinearity': 'None', 'std_parameterization': '"""exp"""', 'hidden_w_init': 'nn.init.ones_', 'output_w_init': 'nn.init.ones_'}), "(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=\n init_std, hidden_nonlinearity=None, std_parameterization='exp',\n hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_)\n", (3528, 3714), False, 'from garage.torch.policies import GaussianMLPPolicy\n'), ((4935, 4983), 'numpy.ones', 'np.ones', (['(batch_size, obs_dim)'], {'dtype': 'np.float32'}), '((batch_size, obs_dim), dtype=np.float32)\n', (4942, 4983), True, 'import numpy as np\n'), ((5024, 5227), 'garage.torch.policies.GaussianMLPPolicy', 'GaussianMLPPolicy', ([], {'env_spec': 'env_spec', 'hidden_sizes': 'hidden_sizes', 'init_std': 'init_std', 'hidden_nonlinearity': 'None', 'std_parameterization': '"""exp"""', 'hidden_w_init': 'nn.init.ones_', 'output_w_init': 'nn.init.ones_'}), "(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=\n init_std, hidden_nonlinearity=None, std_parameterization='exp',\n hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_)\n", (5041, 5227), False, 'from garage.torch.policies import GaussianMLPPolicy\n'), ((6403, 6457), 'torch.ones', 'torch.ones', (['[batch_size, obs_dim]'], {'dtype': 'torch.float32'}), '([batch_size, obs_dim], dtype=torch.float32)\n', (6413, 6457), False, 'import torch\n'), ((6498, 6701), 'garage.torch.policies.GaussianMLPPolicy', 'GaussianMLPPolicy', ([], {'env_spec': 'env_spec', 'hidden_sizes': 'hidden_sizes', 'init_std': 'init_std', 'hidden_nonlinearity': 'None', 'std_parameterization': '"""exp"""', 'hidden_w_init': 'nn.init.ones_', 'output_w_init': 'nn.init.ones_'}), "(env_spec=env_spec, hidden_sizes=hidden_sizes, init_std=\n init_std, hidden_nonlinearity=None, std_parameterization='exp',\n hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_)\n", (6515, 6701), False, 'from garage.torch.policies import GaussianMLPPolicy\n'), ((6980, 7000), 'pickle.dumps', 'pickle.dumps', (['policy'], {}), '(policy)\n', (6992, 7000), False, 'import pickle\n'), ((7026, 7041), 'pickle.loads', 'pickle.loads', (['p'], {}), '(p)\n', (7038, 7041), False, 'import pickle\n'), ((7129, 7187), 'numpy.array_equal', 'np.array_equal', (["output1_prob['mean']", "output2_prob['mean']"], {}), "(output1_prob['mean'], output2_prob['mean'])\n", (7143, 7187), True, 'import numpy as np\n'), ((580, 593), 'tests.fixtures.envs.dummy.DummyBoxEnv', 'DummyBoxEnv', ([], {}), '()\n', (591, 593), False, 'from tests.fixtures.envs.dummy import DummyBoxEnv\n'), ((1534, 1575), 'torch.full', 'torch.full', (['(act_dim,)', 'expected_variance'], {}), '((act_dim,), expected_variance)\n', (1544, 1575), False, 'import torch\n'), ((1889, 1902), 'tests.fixtures.envs.dummy.DummyBoxEnv', 'DummyBoxEnv', ([], {}), '()\n', (1900, 1902), False, 'from tests.fixtures.envs.dummy import DummyBoxEnv\n'), ((2855, 2896), 'torch.full', 'torch.full', (['(act_dim,)', 'expected_variance'], {}), '((act_dim,), expected_variance)\n', (2865, 2896), False, 'import torch\n'), ((3284, 3297), 'tests.fixtures.envs.dummy.DummyBoxEnv', 'DummyBoxEnv', ([], {}), '()\n', (3295, 3297), False, 'from tests.fixtures.envs.dummy import DummyBoxEnv\n'), ((4333, 4385), 'torch.full', 'torch.full', (['(batch_size, act_dim)', 'expected_variance'], {}), '((batch_size, act_dim), expected_variance)\n', (4343, 4385), False, 'import torch\n'), ((4803, 4816), 'tests.fixtures.envs.dummy.DummyBoxEnv', 'DummyBoxEnv', ([], {}), '()\n', (4814, 4816), False, 'from tests.fixtures.envs.dummy import DummyBoxEnv\n'), ((5864, 5916), 'torch.full', 'torch.full', (['(batch_size, act_dim)', 'expected_variance'], {}), '((batch_size, act_dim), expected_variance)\n', (5874, 5916), False, 'import torch\n'), ((6320, 6333), 'tests.fixtures.envs.dummy.DummyBoxEnv', 'DummyBoxEnv', ([], {}), '()\n', (6331, 6333), False, 'from tests.fixtures.envs.dummy import DummyBoxEnv\n'), ((2524, 2545), 'torch.from_numpy', 'torch.from_numpy', (['obs'], {}), '(obs)\n', (2540, 2545), False, 'import torch\n'), ((5452, 5473), 'torch.from_numpy', 'torch.from_numpy', (['obs'], {}), '(obs)\n', (5468, 5473), False, 'import torch\n'), ((1302, 1328), 'torch.Tensor', 'torch.Tensor', (['hidden_sizes'], {}), '(hidden_sizes)\n', (1314, 1328), False, 'import torch\n'), ((2623, 2649), 'torch.Tensor', 'torch.Tensor', (['hidden_sizes'], {}), '(hidden_sizes)\n', (2635, 2649), False, 'import torch\n'), ((4087, 4113), 'torch.Tensor', 'torch.Tensor', (['hidden_sizes'], {}), '(hidden_sizes)\n', (4099, 4113), False, 'import torch\n'), ((5618, 5644), 'torch.Tensor', 'torch.Tensor', (['hidden_sizes'], {}), '(hidden_sizes)\n', (5630, 5644), False, 'import torch\n')] |
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
General utils
"""
import contextlib
import glob
import logging
import math
import os
import platform
import random
import re
import shutil
import signal
import time
import urllib
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from zipfile import ZipFile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from utils.downloads import gsutil_getsize
from utils.metrics import box_iou, fitness
# Settings
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
pd.options.display.max_columns = 10
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
def set_logging(name=None, verbose=True):
# Sets level and returns logger
rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings
logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING)
return logging.getLogger(name)
LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.)
class Profile(contextlib.ContextDecorator):
# Usage: @Profile() decorator or 'with Profile():' context manager
def __enter__(self):
self.start = time.time()
def __exit__(self, type, value, traceback):
print(f'Profile results: {time.time() - self.start:.5f}s')
class Timeout(contextlib.ContextDecorator):
# Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager
def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):
self.seconds = int(seconds)
self.timeout_message = timeout_msg
self.suppress = bool(suppress_timeout_errors)
def _timeout_handler(self, signum, frame):
raise TimeoutError(self.timeout_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM
signal.alarm(self.seconds) # start countdown for SIGALRM to be raised
def __exit__(self, exc_type, exc_val, exc_tb):
signal.alarm(0) # Cancel SIGALRM if it's scheduled
if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError
return True
class WorkingDirectory(contextlib.ContextDecorator):
# Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager
def __init__(self, new_dir):
self.dir = new_dir # new dir
self.cwd = Path.cwd().resolve() # current dir
def __enter__(self):
os.chdir(self.dir)
def __exit__(self, exc_type, exc_val, exc_tb):
os.chdir(self.cwd)
def try_except(func):
# try-except function. Usage: @try_except decorator
def handler(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
print(e)
return handler
def methods(instance):
# Get class/instance methods
return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")]
def print_args(name, opt):
# Print argparser arguments
LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
def init_seeds(seed=0):
# Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html
# cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible
import torch.backends.cudnn as cudnn
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False)
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
def get_latest_run(search_dir='.'):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ''
def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):
# Return path of user configuration directory. Prefer environment variable if exists. Make dir if required.
env = os.getenv(env_var)
if env:
path = Path(env) # use environment variable
else:
cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs
path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir
path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable
path.mkdir(exist_ok=True) # make if required
return path
def is_writeable(dir, test=False):
# Return True if directory has write permissions, test opening a file with write permissions if test=True
if test: # method 1
file = Path(dir) / 'tmp.txt'
try:
with open(file, 'w'): # open file with write permissions
pass
file.unlink() # remove file
return True
except OSError:
return False
else: # method 2
return os.access(dir, os.R_OK) # possible issues on Windows
def is_docker():
# Is environment a Docker container?
return Path('/workspace').exists() # or Path('/.dockerenv').exists()
def is_colab():
# Is environment a Google Colab instance?
try:
import google.colab
return True
except ImportError:
return False
def is_pip():
# Is file in a pip package?
return 'site-packages' in Path(__file__).resolve().parts
def is_ascii(s=''):
# Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7)
s = str(s) # convert list, tuple, None, etc. to str
return len(s.encode().decode('ascii', 'ignore')) == len(s)
def is_chinese(s='人工智能'):
# Is string composed of any Chinese characters?
return re.search('[\u4e00-\u9fff]', s)
def emojis(str=''):
# Return platform-dependent emoji-safe version of string
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
def file_size(path):
# Return file/dir size (MB)
path = Path(path)
if path.is_file():
return path.stat().st_size / 1E6
elif path.is_dir():
return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6
else:
return 0.0
def check_online():
# Check internet connectivity
import socket
try:
socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility
return True
except OSError:
return False
@try_except
@WorkingDirectory(ROOT)
def check_git_status():
# Recommend 'git pull' if code is out of date
msg = ', for updates see https://github.com/ultralytics/yolov5'
print(colorstr('github: '), end='')
assert Path('.git').exists(), 'skipping check (not a git repository)' + msg
assert not is_docker(), 'skipping check (Docker image)' + msg
assert check_online(), 'skipping check (offline)' + msg
cmd = 'git fetch && git config --get remote.origin.url'
url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch
branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
if n > 0:
s = f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update."
else:
s = f'up to date with {url} ✅'
print(emojis(s)) # emoji-safe
def check_python(minimum='3.6.2'):
# Check current python version vs. required python version
check_version(platform.python_version(), minimum, name='Python ', hard=True)
def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False):
# Check version vs. required version
current, minimum = (pkg.parse_version(x) for x in (current, minimum))
result = (current == minimum) if pinned else (current >= minimum) # bool
if hard: # assert min requirements met
assert result, f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed'
else:
return result
@try_except
def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True):
# Check installed dependencies meet requirements (pass *.txt file or list of packages)
prefix = colorstr('red', 'bold', 'requirements:')
check_python() # check python version
if isinstance(requirements, (str, Path)): # requirements.txt file
file = Path(requirements)
assert file.exists(), f"{prefix} {file.resolve()} not found, check failed."
with file.open() as f:
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]
else: # list or tuple of packages
requirements = [x for x in requirements if x not in exclude]
n = 0 # number of packages updates
for r in requirements:
try:
pkg.require(r)
except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
s = f"{prefix} {r} not found and is required by YOLOv5"
if install:
print(f"{s}, attempting auto-update...")
try:
assert check_online(), f"'pip install {r}' skipped (offline)"
print(check_output(f"pip install '{r}'", shell=True).decode())
n += 1
except Exception as e:
print(f'{prefix} {e}')
else:
print(f'{s}. Please install and rerun your command.')
if n: # if packages updated
source = file.resolve() if 'file' in locals() else requirements
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
print(emojis(s))
def check_img_size(imgsz, s=32, floor=0):
# Verify image size is a multiple of stride s in each dimension
if isinstance(imgsz, int): # integer i.e. img_size=640
new_size = max(make_divisible(imgsz, int(s)), floor)
else: # list i.e. img_size=[640, 480]
new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]
if new_size != imgsz:
print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')
return new_size
def check_imshow():
# Check if environment supports image displays
try:
assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'
assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'
cv2.imshow('test', np.zeros((1, 1, 3)))
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return True
except Exception as e:
print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
return False
def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
# Check file(s) for acceptable suffix
if file and suffix:
if isinstance(suffix, str):
suffix = [suffix]
for f in file if isinstance(file, (list, tuple)) else [file]:
s = Path(f).suffix.lower() # file suffix
if len(s):
assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}"
def check_yaml(file, suffix=('.yaml', '.yml')):
# Search/download YAML file (if necessary) and return path, checking suffix
return check_file(file, suffix)
def check_file(file, suffix=''):
# Search/download file (if necessary) and return path
check_suffix(file, suffix) # optional
file = str(file) # convert to str()
if Path(file).is_file() or file == '': # exists
return file
elif file.startswith(('http:/', 'https:/')): # download
url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/
file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth
if Path(file).is_file():
print(f'Found {url} locally at {file}') # file already exists
else:
print(f'Downloading {url} to {file}...')
torch.hub.download_url_to_file(url, file)
assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check
return file
else: # search
files = []
for d in 'data', 'models', 'utils': # search directories
files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file
assert len(files), f'File not found: {file}' # assert file was found
assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
return files[0] # return file
def check_dataset(data, autodownload=True):
# Download and/or unzip dataset if not found locally
# Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip
# Download (optional)
extract_dir = ''
if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip
download(data, dir='../datasets', unzip=True, delete=False, curl=False, threads=1)
data = next((Path('../datasets') / Path(data).stem).rglob('*.yaml'))
extract_dir, autodownload = data.parent, False
# Read yaml (optional)
if isinstance(data, (str, Path)):
with open(data, errors='ignore') as f:
data = yaml.safe_load(f) # dictionary
# Parse yaml
path = extract_dir or Path(data.get('path') or '') # optional 'path' default to '.'
for k in 'train', 'val', 'test':
if data.get(k): # prepend path
data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]]
assert 'nc' in data, "Dataset 'nc' key missing."
if 'names' not in data:
data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing
train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download'))
if val:
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
if s and autodownload: # download script
root = path.parent if 'path' in data else '..' # unzip directory i.e. '../'
if s.startswith('http') and s.endswith('.zip'): # URL
f = Path(s).name # filename
print(f'Downloading {s} to {f}...')
torch.hub.download_url_to_file(s, f)
Path(root).mkdir(parents=True, exist_ok=True) # create root
ZipFile(f).extractall(path=root) # unzip
Path(f).unlink() # remove zip
r = None # success
elif s.startswith('bash '): # bash script
print(f'Running {s} ...')
r = os.system(s)
else: # python script
r = exec(s, {'yaml': data}) # return None
print(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\n")
else:
raise Exception('Dataset not found.')
return data # dictionary
def url2file(url):
# Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt
url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/
file = Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth
return file
def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1):
# Multi-threaded file download and unzip function, used in data.yaml for autodownload
def download_one(url, dir):
# Download 1 file
f = dir / Path(url).name # filename
if Path(url).is_file(): # exists in current path
Path(url).rename(f) # move to dir
elif not f.exists():
print(f'Downloading {url} to {f}...')
if curl:
os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail
else:
torch.hub.download_url_to_file(url, f, progress=True) # torch download
if unzip and f.suffix in ('.zip', '.gz'):
print(f'Unzipping {f}...')
if f.suffix == '.zip':
ZipFile(f).extractall(path=dir) # unzip
elif f.suffix == '.gz':
os.system(f'tar xfz {f} --directory {f.parent}') # unzip
if delete:
f.unlink() # remove zip
dir = Path(dir)
dir.mkdir(parents=True, exist_ok=True) # make directory
if threads > 1:
pool = ThreadPool(threads)
pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded
pool.close()
pool.join()
else:
for u in [url] if isinstance(url, (str, Path)) else url:
download_one(u, dir)
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
def clean_str(s):
# Cleans a string by replacing special characters with underscore _
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def one_cycle(y1=0.0, y2=1.0, steps=100):
# lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
def colorstr(*input):
# Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
*args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
colors = {'black': '\033[30m', # basic colors
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'magenta': '\033[35m',
'cyan': '\033[36m',
'white': '\033[37m',
'bright_black': '\033[90m', # bright colors
'bright_red': '\033[91m',
'bright_green': '\033[92m',
'bright_yellow': '\033[93m',
'bright_blue': '\033[94m',
'bright_magenta': '\033[95m',
'bright_cyan': '\033[96m',
'bright_white': '\033[97m',
'end': '\033[0m', # misc
'bold': '\033[1m',
'underline': '\033[4m'}
return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurrences per class
# Prepend gridpoint count (for uCE training)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class_weights and image contents
class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
return x
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
# Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
return y
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
if clip:
clip_coords(x, (h - eps, w - eps)) # warning: inplace clip
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center
y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center
y[:, 2] = (x[:, 2] - x[:, 0]) / w # width
y[:, 3] = (x[:, 3] - x[:, 1]) / h # height
return y
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
# Convert normalized segments into pixel segments, shape (n,2)
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * x[:, 0] + padw # top left x
y[:, 1] = h * x[:, 1] + padh # top left y
return y
def segment2box(segment, width=640, height=640):
# Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
x, y, = x[inside], y[inside]
return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
def segments2boxes(segments):
# Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
boxes = []
for s in segments:
x, y = s.T # segment xy
boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
return xyxy2xywh(np.array(boxes)) # cls, xywh
def resample_segments(segments, n=1000):
# Up-sample an (n,2) segment
for i, s in enumerate(segments):
x = np.linspace(0, len(s) - 1, n)
xp = np.arange(len(s))
segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
return segments
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
if isinstance(boxes, torch.Tensor): # faster individually
boxes[:, 0].clamp_(0, shape[1]) # x1
boxes[:, 1].clamp_(0, shape[0]) # y1
boxes[:, 2].clamp_(0, shape[1]) # x2
boxes[:, 3].clamp_(0, shape[0]) # y2
else: # np.array (faster grouped)
boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2
boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
labels=(), max_det=300):
"""Runs Non-Maximum Suppression (NMS) on inference results
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
nc = prediction.shape[2] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Checks
assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
l = labels[xi]
v = torch.zeros((len(l), nc + 5), device=x.device)
v[:, :4] = l[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
print(f'WARNING: NMS time limit {time_limit}s exceeded')
break # time limit exceeded
return output
def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1E6 # filesize
print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
def print_mutation(results, hyp, save_dir, bucket):
evolve_csv, results_csv, evolve_yaml = save_dir / 'evolve.csv', save_dir / 'results.csv', save_dir / 'hyp_evolve.yaml'
keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
'val/box_loss', 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps]
keys = tuple(x.strip() for x in keys)
vals = results + tuple(hyp.values())
n = len(keys)
# Download (optional)
if bucket:
url = f'gs://{bucket}/evolve.csv'
if gsutil_getsize(url) > (os.path.getsize(evolve_csv) if os.path.exists(evolve_csv) else 0):
os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local
# Log to evolve.csv
s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header
with open(evolve_csv, 'a') as f:
f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n')
# Print to screen
print(colorstr('evolve: ') + ', '.join(f'{x.strip():>20s}' for x in keys))
print(colorstr('evolve: ') + ', '.join(f'{x:20.5g}' for x in vals), end='\n\n\n')
# Save yaml
with open(evolve_yaml, 'w') as f:
data = pd.read_csv(evolve_csv)
data = data.rename(columns=lambda x: x.strip()) # strip keys
i = np.argmax(fitness(data.values[:, :7])) #
f.write('# YOLOv5 Hyperparameter Evolution Results\n' +
f'# Best generation: {i}\n' +
f'# Last generation: {len(data) - 1}\n' +
'# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\n' +
'# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n')
yaml.safe_dump(hyp, f, sort_keys=False)
if bucket:
os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload
def apply_classifier(x, model, img, im0):
# Apply a second stage classifier to YOLO outputs
# Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval()
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for j, a in enumerate(d): # per item
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
# cv2.imwrite('example%i.jpg' % j, cutout)
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
im /= 255 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
return x
def increment_path(path, exist_ok=False, sep='', mkdir=False):
# Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
path = Path(path) # os-agnostic
if path.exists() and not exist_ok:
path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
path = Path(f"{path}{sep}{n}{suffix}") # increment path
if mkdir:
path.mkdir(parents=True, exist_ok=True) # make directory
return path
# Variables
NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm
| [
"urllib.parse.unquote",
"platform.python_version",
"numpy.random.seed",
"pathlib.Path.home",
"yaml.safe_dump",
"pkg_resources.parse_requirements",
"pandas.read_csv",
"pkg_resources.require",
"numpy.ones",
"torch.cat",
"torch.mm",
"pathlib.Path",
"yaml.safe_load",
"glob.glob",
"torch.devi... | [((611, 677), 'torch.set_printoptions', 'torch.set_printoptions', ([], {'linewidth': '(320)', 'precision': '(5)', 'profile': '"""long"""'}), "(linewidth=320, precision=5, profile='long')\n", (633, 677), False, 'import torch\n'), ((678, 757), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(320)', 'formatter': "{'float_kind': '{:11.5g}'.format}"}), "(linewidth=320, formatter={'float_kind': '{:11.5g}'.format})\n", (697, 757), True, 'import numpy as np\n'), ((826, 846), 'cv2.setNumThreads', 'cv2.setNumThreads', (['(0)'], {}), '(0)\n', (843, 846), False, 'import cv2\n'), ((1255, 1372), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(message)s"""', 'level': '(logging.INFO if verbose and rank in (-1, 0) else logging.WARNING)'}), "(format='%(message)s', level=logging.INFO if verbose and\n rank in (-1, 0) else logging.WARNING)\n", (1274, 1372), False, 'import logging\n'), ((1382, 1405), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1399, 1405), False, 'import logging\n'), ((3871, 3888), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3882, 3888), False, 'import random\n'), ((3893, 3913), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3907, 3913), True, 'import numpy as np\n'), ((3918, 3941), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (3935, 3941), False, 'import torch\n'), ((4420, 4474), 'glob.glob', 'glob.glob', (['f"""{search_dir}/**/last*.pt"""'], {'recursive': '(True)'}), "(f'{search_dir}/**/last*.pt', recursive=True)\n", (4429, 4474), False, 'import glob\n'), ((4737, 4755), 'os.getenv', 'os.getenv', (['env_var'], {}), '(env_var)\n', (4746, 4755), False, 'import os\n'), ((6487, 6513), 're.search', 're.search', (['"""[一-\u9fff]"""', 's'], {}), "('[一-\\u9fff]', s)\n", (6496, 6513), False, 'import re\n'), ((6761, 6771), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (6765, 6771), False, 'from pathlib import Path\n'), ((17595, 17604), 'pathlib.Path', 'Path', (['dir'], {}), '(dir)\n', (17599, 17604), False, 'from pathlib import Path\n'), ((18182, 18247), 're.sub', 're.sub', ([], {'pattern': '"""[|@#!¡·$€%&()=?¿^*;:,¨´><+]"""', 'repl': '"""_"""', 'string': 's'}), "(pattern='[|@#!¡·$€%&()=?¿^*;:,¨´><+]', repl='_', string=s)\n", (18188, 18247), False, 'import re\n'), ((19733, 19758), 'numpy.concatenate', 'np.concatenate', (['labels', '(0)'], {}), '(labels, 0)\n', (19747, 19758), True, 'import numpy as np\n'), ((19879, 19913), 'numpy.bincount', 'np.bincount', (['classes'], {'minlength': 'nc'}), '(classes, minlength=nc)\n', (19890, 19913), True, 'import numpy as np\n'), ((20364, 20389), 'torch.from_numpy', 'torch.from_numpy', (['weights'], {}), '(weights)\n', (20380, 20389), False, 'import torch\n'), ((20449, 20460), 'numpy.ones', 'np.ones', (['(80)'], {}), '(80)\n', (20456, 20460), True, 'import numpy as np\n'), ((27027, 27038), 'time.time', 'time.time', ([], {}), '()\n', (27036, 27038), False, 'import time\n'), ((30388, 30409), 'torch.save', 'torch.save', (['x', '(s or f)'], {}), '(x, s or f)\n', (30398, 30409), False, 'import torch\n'), ((34096, 34106), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (34100, 34106), False, 'from pathlib import Path\n'), ((968, 982), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (980, 982), False, 'import os\n'), ((1019, 1033), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1023, 1033), False, 'from pathlib import Path\n'), ((1187, 1208), 'os.getenv', 'os.getenv', (['"""RANK"""', '(-1)'], {}), "('RANK', -1)\n", (1196, 1208), False, 'import os\n'), ((1665, 1676), 'time.time', 'time.time', ([], {}), '()\n', (1674, 1676), False, 'import time\n'), ((2270, 2322), 'signal.signal', 'signal.signal', (['signal.SIGALRM', 'self._timeout_handler'], {}), '(signal.SIGALRM, self._timeout_handler)\n', (2283, 2322), False, 'import signal\n'), ((2358, 2384), 'signal.alarm', 'signal.alarm', (['self.seconds'], {}), '(self.seconds)\n', (2370, 2384), False, 'import signal\n'), ((2489, 2504), 'signal.alarm', 'signal.alarm', (['(0)'], {}), '(0)\n', (2501, 2504), False, 'import signal\n'), ((2955, 2973), 'os.chdir', 'os.chdir', (['self.dir'], {}), '(self.dir)\n', (2963, 2973), False, 'import os\n'), ((3034, 3052), 'os.chdir', 'os.chdir', (['self.cwd'], {}), '(self.cwd)\n', (3042, 3052), False, 'import os\n'), ((4783, 4792), 'pathlib.Path', 'Path', (['env'], {}), '(env)\n', (4787, 4792), False, 'from pathlib import Path\n'), ((5684, 5707), 'os.access', 'os.access', (['dir', 'os.R_OK'], {}), '(dir, os.R_OK)\n', (5693, 5707), False, 'import os\n'), ((7065, 7110), 'socket.create_connection', 'socket.create_connection', (["('1.1.1.1', 443)", '(5)'], {}), "(('1.1.1.1', 443), 5)\n", (7089, 7110), False, 'import socket\n'), ((7900, 7973), 'subprocess.check_output', 'check_output', (['f"""git rev-list {branch}..origin/master --count"""'], {'shell': '(True)'}), "(f'git rev-list {branch}..origin/master --count', shell=True)\n", (7912, 7973), False, 'from subprocess import check_output\n'), ((8326, 8351), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (8349, 8351), False, 'import platform\n'), ((8552, 8572), 'pkg_resources.parse_version', 'pkg.parse_version', (['x'], {}), '(x)\n', (8569, 8572), True, 'import pkg_resources as pkg\n'), ((9239, 9257), 'pathlib.Path', 'Path', (['requirements'], {}), '(requirements)\n', (9243, 9257), False, 'from pathlib import Path\n'), ((11459, 11473), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (11470, 11473), False, 'import cv2\n'), ((11482, 11505), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (11503, 11505), False, 'import cv2\n'), ((11514, 11528), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (11525, 11528), False, 'import cv2\n'), ((17701, 17720), 'multiprocessing.pool.ThreadPool', 'ThreadPool', (['threads'], {}), '(threads)\n', (17711, 17720), False, 'from multiprocessing.pool import ThreadPool\n'), ((18046, 18068), 'math.ceil', 'math.ceil', (['(x / divisor)'], {}), '(x / divisor)\n', (18055, 18068), False, 'import math\n'), ((19704, 19718), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (19716, 19718), False, 'import torch\n'), ((21853, 21863), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (21860, 21863), True, 'import numpy as np\n'), ((22233, 22243), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (22240, 22243), True, 'import numpy as np\n'), ((22680, 22690), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (22687, 22690), True, 'import numpy as np\n'), ((23265, 23275), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (23272, 23275), True, 'import numpy as np\n'), ((23664, 23674), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (23671, 23674), True, 'import numpy as np\n'), ((24149, 24165), 'numpy.zeros', 'np.zeros', (['(1, 4)'], {}), '((1, 4))\n', (24157, 24165), True, 'import numpy as np\n'), ((24455, 24470), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (24463, 24470), True, 'import numpy as np\n'), ((29053, 29098), 'torchvision.ops.nms', 'torchvision.ops.nms', (['boxes', 'scores', 'iou_thres'], {}), '(boxes, scores, iou_thres)\n', (29072, 29098), False, 'import torchvision\n'), ((30419, 30442), 'os.path.getsize', 'os.path.getsize', (['(s or f)'], {}), '(s or f)\n', (30434, 30442), False, 'import os\n'), ((31783, 31806), 'pandas.read_csv', 'pd.read_csv', (['evolve_csv'], {}), '(evolve_csv)\n', (31794, 31806), True, 'import pandas as pd\n'), ((32274, 32313), 'yaml.safe_dump', 'yaml.safe_dump', (['hyp', 'f'], {'sort_keys': '(False)'}), '(hyp, f, sort_keys=False)\n', (32288, 32313), False, 'import yaml\n'), ((32338, 32402), 'os.system', 'os.system', (['f"""gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}"""'], {}), "(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}')\n", (32347, 32402), False, 'import os\n'), ((34269, 34295), 'glob.glob', 'glob.glob', (['f"""{path}{sep}*"""'], {}), "(f'{path}{sep}*')\n", (34278, 34295), False, 'import glob\n'), ((34526, 34557), 'pathlib.Path', 'Path', (['f"""{path}{sep}{n}{suffix}"""'], {}), "(f'{path}{sep}{n}{suffix}')\n", (34530, 34557), False, 'from pathlib import Path\n'), ((34716, 34742), 'shutil.get_terminal_size', 'shutil.get_terminal_size', ([], {}), '()\n', (34740, 34742), False, 'import shutil\n'), ((4965, 4976), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (4974, 4976), False, 'from pathlib import Path\n'), ((5407, 5416), 'pathlib.Path', 'Path', (['dir'], {}), '(dir)\n', (5411, 5416), False, 'from pathlib import Path\n'), ((5809, 5827), 'pathlib.Path', 'Path', (['"""/workspace"""'], {}), "('/workspace')\n", (5813, 5827), False, 'from pathlib import Path\n'), ((6655, 6672), 'platform.system', 'platform.system', ([], {}), '()\n', (6670, 6672), False, 'import platform\n'), ((7431, 7443), 'pathlib.Path', 'Path', (['""".git"""'], {}), "('.git')\n", (7435, 7443), False, 'from pathlib import Path\n'), ((9688, 9702), 'pkg_resources.require', 'pkg.require', (['r'], {}), '(r)\n', (9699, 9702), True, 'import pkg_resources as pkg\n'), ((11430, 11449), 'numpy.zeros', 'np.zeros', (['(1, 1, 3)'], {}), '((1, 1, 3))\n', (11438, 11449), True, 'import numpy as np\n'), ((14299, 14316), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (14313, 14316), False, 'import yaml\n'), ((27053, 27098), 'torch.zeros', 'torch.zeros', (['(0, 6)'], {'device': 'prediction.device'}), '((0, 6), device=prediction.device)\n', (27064, 27098), False, 'import torch\n'), ((27679, 27699), 'torch.cat', 'torch.cat', (['(x, v)', '(0)'], {}), '((x, v), 0)\n', (27688, 27699), False, 'import torch\n'), ((30059, 30078), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (30071, 30078), False, 'import torch\n'), ((31118, 31137), 'utils.downloads.gsutil_getsize', 'gsutil_getsize', (['url'], {}), '(url)\n', (31132, 31137), False, 'from utils.downloads import gsutil_getsize\n'), ((31220, 31260), 'os.system', 'os.system', (['f"""gsutil cp {url} {save_dir}"""'], {}), "(f'gsutil cp {url} {save_dir}')\n", (31229, 31260), False, 'import os\n'), ((31899, 31926), 'utils.metrics.fitness', 'fitness', (['data.values[:, :7]'], {}), '(data.values[:, :7])\n', (31906, 31926), False, 'from utils.metrics import box_iou, fitness\n'), ((34332, 34374), 're.search', 're.search', (["(f'%s{sep}(\\\\d+)' % path.stem)", 'd'], {}), "(f'%s{sep}(\\\\d+)' % path.stem, d)\n", (34341, 34374), False, 'import re\n'), ((2885, 2895), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (2893, 2895), False, 'from pathlib import Path\n'), ((4987, 5004), 'platform.system', 'platform.system', ([], {}), '()\n', (5002, 5004), False, 'import platform\n'), ((5084, 5096), 'pathlib.Path', 'Path', (['"""/tmp"""'], {}), "('/tmp')\n", (5088, 5096), False, 'from pathlib import Path\n'), ((12477, 12487), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (12481, 12487), False, 'from pathlib import Path\n'), ((12989, 13030), 'torch.hub.download_url_to_file', 'torch.hub.download_url_to_file', (['url', 'file'], {}), '(url, file)\n', (13019, 13030), False, 'import torch\n'), ((16344, 16353), 'pathlib.Path', 'Path', (['url'], {}), '(url)\n', (16348, 16353), False, 'from pathlib import Path\n'), ((16778, 16787), 'pathlib.Path', 'Path', (['url'], {}), '(url)\n', (16782, 16787), False, 'from pathlib import Path\n'), ((16816, 16825), 'pathlib.Path', 'Path', (['url'], {}), '(url)\n', (16820, 16825), False, 'from pathlib import Path\n'), ((17776, 17787), 'itertools.repeat', 'repeat', (['dir'], {}), '(dir)\n', (17782, 17787), False, 'from itertools import repeat\n'), ((29358, 29382), 'utils.metrics.box_iou', 'box_iou', (['boxes[i]', 'boxes'], {}), '(boxes[i], boxes)\n', (29365, 29382), False, 'from utils.metrics import box_iou, fitness\n'), ((29696, 29707), 'time.time', 'time.time', ([], {}), '()\n', (29705, 29707), False, 'import time\n'), ((31172, 31198), 'os.path.exists', 'os.path.exists', (['evolve_csv'], {}), '(evolve_csv)\n', (31186, 31198), False, 'import os\n'), ((31141, 31168), 'os.path.getsize', 'os.path.getsize', (['evolve_csv'], {}), '(evolve_csv)\n', (31156, 31168), False, 'import os\n'), ((33379, 33409), 'cv2.resize', 'cv2.resize', (['cutout', '(224, 224)'], {}), '(cutout, (224, 224))\n', (33389, 33409), False, 'import cv2\n'), ((33581, 33623), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['im'], {'dtype': 'np.float32'}), '(im, dtype=np.float32)\n', (33601, 33623), True, 'import numpy as np\n'), ((6116, 6130), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (6120, 6130), False, 'from pathlib import Path\n'), ((7796, 7855), 'subprocess.check_output', 'check_output', (['"""git rev-parse --abbrev-ref HEAD"""'], {'shell': '(True)'}), "('git rev-parse --abbrev-ref HEAD', shell=True)\n", (7808, 7855), False, 'from subprocess import check_output\n'), ((9435, 9460), 'pkg_resources.parse_requirements', 'pkg.parse_requirements', (['f'], {}), '(f)\n', (9457, 9460), True, 'import pkg_resources as pkg\n'), ((12813, 12823), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (12817, 12823), False, 'from pathlib import Path\n'), ((14916, 14923), 'pathlib.Path', 'Path', (['x'], {}), '(x)\n', (14920, 14923), False, 'from pathlib import Path\n'), ((15501, 15537), 'torch.hub.download_url_to_file', 'torch.hub.download_url_to_file', (['s', 'f'], {}), '(s, f)\n', (15531, 15537), False, 'import torch\n'), ((16875, 16884), 'pathlib.Path', 'Path', (['url'], {}), '(url)\n', (16879, 16884), False, 'from pathlib import Path\n'), ((17026, 17079), 'os.system', 'os.system', (['f"""curl -L \'{url}\' -o \'{f}\' --retry 9 -C -"""'], {}), '(f"curl -L \'{url}\' -o \'{f}\' --retry 9 -C -")\n', (17035, 17079), False, 'import os\n'), ((17157, 17210), 'torch.hub.download_url_to_file', 'torch.hub.download_url_to_file', (['url', 'f'], {'progress': '(True)'}), '(url, f, progress=True)\n', (17187, 17210), False, 'import torch\n'), ((17462, 17510), 'os.system', 'os.system', (['f"""tar xfz {f} --directory {f.parent}"""'], {}), "(f'tar xfz {f} --directory {f.parent}')\n", (17471, 17510), False, 'import os\n'), ((1760, 1771), 'time.time', 'time.time', ([], {}), '()\n', (1769, 1771), False, 'import time\n'), ((11988, 11995), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (11992, 11995), False, 'from pathlib import Path\n'), ((12622, 12632), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (12626, 12632), False, 'from pathlib import Path\n'), ((14056, 14075), 'pathlib.Path', 'Path', (['"""../datasets"""'], {}), "('../datasets')\n", (14060, 14075), False, 'from pathlib import Path\n'), ((15400, 15407), 'pathlib.Path', 'Path', (['s'], {}), '(s)\n', (15404, 15407), False, 'from pathlib import Path\n'), ((15901, 15913), 'os.system', 'os.system', (['s'], {}), '(s)\n', (15910, 15913), False, 'import os\n'), ((16419, 16444), 'urllib.parse.unquote', 'urllib.parse.unquote', (['url'], {}), '(url)\n', (16439, 16444), False, 'import urllib\n'), ((17369, 17379), 'zipfile.ZipFile', 'ZipFile', (['f'], {}), '(f)\n', (17376, 17379), False, 'from zipfile import ZipFile\n'), ((18412, 18441), 'math.cos', 'math.cos', (['(x * math.pi / steps)'], {}), '(x * math.pi / steps)\n', (18420, 18441), False, 'import math\n'), ((29488, 29515), 'torch.mm', 'torch.mm', (['weights', 'x[:, :4]'], {}), '(weights, x[:, :4])\n', (29496, 29515), False, 'import torch\n'), ((7697, 7737), 'subprocess.check_output', 'check_output', (['cmd'], {'shell': '(True)', 'timeout': '(5)'}), '(cmd, shell=True, timeout=5)\n', (7709, 7737), False, 'from subprocess import check_output\n'), ((13050, 13060), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (13054, 13060), False, 'from pathlib import Path\n'), ((14078, 14088), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (14082, 14088), False, 'from pathlib import Path\n'), ((15558, 15568), 'pathlib.Path', 'Path', (['root'], {}), '(root)\n', (15562, 15568), False, 'from pathlib import Path\n'), ((15639, 15649), 'zipfile.ZipFile', 'ZipFile', (['f'], {}), '(f)\n', (15646, 15649), False, 'from zipfile import ZipFile\n'), ((15701, 15708), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (15705, 15708), False, 'from pathlib import Path\n'), ((24709, 24734), 'numpy.interp', 'np.interp', (['x', 'xp', 's[:, i]'], {}), '(x, xp, s[:, i])\n', (24718, 24734), True, 'import numpy as np\n'), ((28461, 28499), 'torch.tensor', 'torch.tensor', (['classes'], {'device': 'x.device'}), '(classes, device=x.device)\n', (28473, 28499), False, 'import torch\n'), ((12702, 12728), 'urllib.parse.unquote', 'urllib.parse.unquote', (['file'], {}), '(file)\n', (12722, 12728), False, 'import urllib\n'), ((33756, 33773), 'torch.Tensor', 'torch.Tensor', (['ims'], {}), '(ims)\n', (33768, 33773), False, 'import torch\n'), ((10079, 10125), 'subprocess.check_output', 'check_output', (['f"""pip install \'{r}\'"""'], {'shell': '(True)'}), '(f"pip install \'{r}\'", shell=True)\n', (10091, 10125), False, 'from subprocess import check_output\n'), ((13074, 13084), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (13078, 13084), False, 'from pathlib import Path\n')] |
from math import exp
from .dataframe import DataFrame
import numpy as np
import matplotlib.pyplot as plt
from pandas import Series
from .chart import Chart
import seaborn as sns
class CSM:
def __init__(self):
self.crop_type = 'Wheat'
self.season_length = 242
self.climate_dataframe = self.read_climate_dataframe()
self.monitoring = DataFrame(Series([i+1 for i in range(self.season_length)]), ['day'], data_type='list')
self.CC0 = 4.5
self.CCx = 89.33
self.CGC = 0.0089
self.CDC = 0.145
self.CCx_2 = self.CCx / 2
self.Tupper = 33
self.Tbase = 5
self.CGDD_sowing = 82
# hectare
self.area = 100
# crop characteristics #
# 0.32 m-3/3-3 ou % theta_fc
self.wc_field_capacity = 0.32
# 0.17 m-3/3-3 ou % theta_fc
# it is the water quantity below which the crop can no longer extract water, it is the separtion of
# AW or TAW and NAW
self.wc_wilting_point = 0.17
def cc_equation1(self, t):
return self.CC0 * exp(t * self.CGC)
def cc_equation2(self, t):
return self.CCx - (0.25 * exp(-t * self.CGC) * (self.CCx**2)/(self.CC0))
def cc_equation3(self, t):
return self.CCx * (1 - (0.05 * (exp((3.33 * self.CDC) * t/(self.CCx + 2.29)) - 1)))
def simulate_canopy_cover(self, offset=0):
Tmb = np.zeros((self.season_length,))
ti = np.zeros((self.season_length,))
CC = np.zeros((self.season_length,))
Eq1 = np.zeros((self.season_length,))
Eq2 = np.zeros((self.season_length,))
Eq3 = np.zeros((self.season_length,))
for day in range(offset, self.season_length):
if (self.climate_dataframe[day] < 5):
Tmb[day] = 0
else:
if (self.climate_dataframe[day] >= self.Tupper):
Tmb[day] = self.Tupper - self.Tbase
else:
Tmb[day] = self.climate_dataframe[day] - self.Tbase
ti[offset] = Tmb[offset]
for k in range((offset + 1), 242):
ti[k] = Tmb[k] + ti[k - 1]
t0_all = np.argwhere(ti >= self.CGDD_sowing)
t0 = t0_all[0]
for i in range(offset, t0[0]):
CC[i] = 0
CC[t0[0]] = self.CC0
ti[t0[0]] = 0
for p in range((t0[0] + 1), 242):
ti[p] = Tmb[p] + ti[p - 1]
for m in range((t0[0] + 1), 242):
Eq1[m] = self.cc_equation1(ti[m])
for m in range((t0[0] + 1),242):
Eq1[m] = self.cc_equation1(ti[m])
Eq2[m] = self.cc_equation2(ti[m])
Eq2[m] = Eq2[m].round(2)
p1 = np.argwhere(Eq1 >= self.CCx_2)
phase1 = p1[0][0]
for ii in range((t0[0] + 1), phase1):
CC[ii] = Eq1[ii]
p2 = np.argwhere(Eq2 >= self.CCx)
phase2 = p2[0][0]
for jj in range(phase1, phase2):
CC[jj] = Eq2[jj]
ti[phase2] = 0
CC[phase2] = self.CCx
for kk in range((phase2 + 1), 242):
ti[kk] = Tmb[kk] + ti[kk - 1]
Eq3[kk] = self.cc_equation3(ti[kk])
if (Eq3[kk] >= 0):
CC[kk] = Eq3[kk]
else:
CC[kk] = 0
for kk in range((phase2 + 1), 242):
if (Eq3[kk] < self.CCx_2):
day_final = kk - 1
break
self.monitoring.add_column(CC, 'cc')
return CC
def simulate_fc(self):
self.monitoring.add_transformed_columns('fc', 'cc/100')
def simulate_ndvi(self):
self.monitoring.add_transformed_columns('ndvi', '(cc/118)+0.14')
def simulate_kcb(self):
self.monitoring.add_transformed_columns('k_cb', '(1.64*ndvi)-0.2296')
def simulate_ke(self):
self.monitoring.add_transformed_columns('k_e', '[0.2 (1−fc)]')
def simulate_et0(self, method='pm'):
self.monitoring.add_transformed_columns('et_0', '(1.64*ndvi)-0.2296')
def simulate_etc(self, method='double'):
self.monitoring.add_transformed_columns('et_c', '[(1.64 * NDVI)-0.2296]+[0.2 * (1 - fc)]*et_0')
def simulate_p(self, method='pm'):
self.monitoring.add_transformed_columns('p', '0.55+0.04*(5-et_c)')
def simulate_raw(self, method='pm'):
self.monitoring.add_transformed_columns('raw', '0.55+0.04*(5-et_c)')
def simulate_taw(self, method='pm'):
self.monitoring.add_transformed_columns('taw', '1000*(0.32-0.17)*zr')
def estimate_yield(self, method='last_10_ndvi'):
ndvi_list = self.monitoring.get_column('ndvi')
if method == 'max_ndvi':
ndvi_max = float(max(ndvi_list))
estimated_yield = 23.69*ndvi_max - 13.87
elif method == 'last_10_ndvi':
ndvi_list = list(ndvi_list)
sum_of_last_10_ndvi = sum([float(ndvi_list[153-i]) for i in range(10)])
estimated_yield = 1.79*sum_of_last_10_ndvi - 8.62
return estimated_yield*self.area
def monitor(self):
self.monitoring.show()
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,5))
fig.suptitle('Visual simulation')
# CC
sns.lineplot(ax=axes[0], x=self.monitoring.get_dataframe().index, y=self.monitoring.get_column('cc').values)
axes[0].set_title(self.monitoring.get_column('cc').name)
# fc
#sns.lineplot(ax=axes[1], x=self.monitoring.get_dataframe().index, y=self.monitoring.get_column('fc').values)
#axes[1].set_title(self.monitoring.get_column('fc').name)
# NDVI
sns.lineplot(ax=axes[1], x=self.monitoring.get_dataframe().index, y=self.monitoring.get_column('ndvi').values)
axes[1].set_title(self.monitoring.get_column('ndvi').name)
plt.show()
def read_climate_dataframe(self):
data = DataFrame('mean_temperature.csv')
data.keep_columns(['t_mean'])
return data.get_column_as_list('t_mean') | [
"math.exp",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.argwhere",
"matplotlib.pyplot.subplots"
] | [((1474, 1505), 'numpy.zeros', 'np.zeros', (['(self.season_length,)'], {}), '((self.season_length,))\n', (1482, 1505), True, 'import numpy as np\n'), ((1519, 1550), 'numpy.zeros', 'np.zeros', (['(self.season_length,)'], {}), '((self.season_length,))\n', (1527, 1550), True, 'import numpy as np\n'), ((1564, 1595), 'numpy.zeros', 'np.zeros', (['(self.season_length,)'], {}), '((self.season_length,))\n', (1572, 1595), True, 'import numpy as np\n'), ((1610, 1641), 'numpy.zeros', 'np.zeros', (['(self.season_length,)'], {}), '((self.season_length,))\n', (1618, 1641), True, 'import numpy as np\n'), ((1656, 1687), 'numpy.zeros', 'np.zeros', (['(self.season_length,)'], {}), '((self.season_length,))\n', (1664, 1687), True, 'import numpy as np\n'), ((1702, 1733), 'numpy.zeros', 'np.zeros', (['(self.season_length,)'], {}), '((self.season_length,))\n', (1710, 1733), True, 'import numpy as np\n'), ((2271, 2306), 'numpy.argwhere', 'np.argwhere', (['(ti >= self.CGDD_sowing)'], {}), '(ti >= self.CGDD_sowing)\n', (2282, 2306), True, 'import numpy as np\n'), ((2840, 2870), 'numpy.argwhere', 'np.argwhere', (['(Eq1 >= self.CCx_2)'], {}), '(Eq1 >= self.CCx_2)\n', (2851, 2870), True, 'import numpy as np\n'), ((2987, 3015), 'numpy.argwhere', 'np.argwhere', (['(Eq2 >= self.CCx)'], {}), '(Eq2 >= self.CCx)\n', (2998, 3015), True, 'import numpy as np\n'), ((5274, 5322), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharex': '(True)', 'figsize': '(10, 5)'}), '(1, 2, sharex=True, figsize=(10, 5))\n', (5286, 5322), True, 'import matplotlib.pyplot as plt\n'), ((5992, 6002), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6000, 6002), True, 'import matplotlib.pyplot as plt\n'), ((1143, 1160), 'math.exp', 'exp', (['(t * self.CGC)'], {}), '(t * self.CGC)\n', (1146, 1160), False, 'from math import exp\n'), ((1231, 1249), 'math.exp', 'exp', (['(-t * self.CGC)'], {}), '(-t * self.CGC)\n', (1234, 1249), False, 'from math import exp\n'), ((1355, 1399), 'math.exp', 'exp', (['(3.33 * self.CDC * t / (self.CCx + 2.29))'], {}), '(3.33 * self.CDC * t / (self.CCx + 2.29))\n', (1358, 1399), False, 'from math import exp\n')] |
from __future__ import absolute_import, division, print_function
import sys
import base64
import uuid
if sys.version_info >= (3, 0):
unicode = str
from io import StringIO, BytesIO
else:
from StringIO import StringIO
BytesIO = StringIO
import umsgpack
import numpy as np
from . import transformations as tf
class SceneElement(object):
def __init__(self):
self.uuid = unicode(uuid.uuid1())
class ReferenceSceneElement(SceneElement):
def lower_in_object(self, object_data):
object_data.setdefault(self.field, []).append(self.lower(object_data))
return self.uuid
class Geometry(ReferenceSceneElement):
field = "geometries"
def intrinsic_transform(self):
return tf.identity_matrix()
class Material(ReferenceSceneElement):
field = "materials"
class Texture(ReferenceSceneElement):
field = "textures"
class Image(ReferenceSceneElement):
field = "images"
class Box(Geometry):
def __init__(self, lengths):
super(Box, self).__init__()
self.lengths = lengths
def lower(self, object_data):
return {
u"uuid": self.uuid,
u"type": u"BoxGeometry",
u"width": self.lengths[0],
u"height": self.lengths[1],
u"depth": self.lengths[2]
}
class Sphere(Geometry):
def __init__(self, radius):
super(Sphere, self).__init__()
self.radius = radius
def lower(self, object_data):
return {
u"uuid": self.uuid,
u"type": u"SphereGeometry",
u"radius": self.radius,
u"widthSegments" : 20,
u"heightSegments" : 20
}
class Ellipsoid(Sphere):
"""
An Ellipsoid is treated as a Sphere of unit radius, with an affine
transformation applied to distort it into the ellipsoidal shape
"""
def __init__(self, radii):
super(Ellipsoid, self).__init__(1.0)
self.radii = radii
def intrinsic_transform(self):
return np.diag(np.hstack((self.radii, 1.0)))
"""
A cylinder of the given height and radius. By Three.js convention, the axis of
rotational symmetry is aligned with the y-axis.
"""
class Cylinder(Geometry):
def __init__(self, height, radius=1.0, radiusTop=None, radiusBottom=None):
super(Cylinder, self).__init__()
if radiusTop is not None and radiusBottom is not None:
self.radiusTop = radiusTop
self.radiusBottom = radiusBottom
else:
self.radiusTop = radius
self.radiusBottom = radius
self.height = height
self.radialSegments = 50
def lower(self, object_data):
return {
u"uuid": self.uuid,
u"type": u"CylinderGeometry",
u"radiusTop": self.radiusTop,
u"radiusBottom": self.radiusBottom,
u"height": self.height,
u"radialSegments": self.radialSegments
}
class GenericMaterial(Material):
def __init__(self, color=0xffffff, reflectivity=0.5, map=None,
side = 2, transparent = None, opacity = 1.0,
linewidth = 1.0,
wireframe = False,
wireframeLinewidth = 1.0,
vertexColors=False,
**kwargs):
super(GenericMaterial, self).__init__()
self.color = color
self.reflectivity = reflectivity
self.map = map
self.side = side
self.transparent = transparent
self.opacity = opacity
self.linewidth = linewidth
self.wireframe = wireframe
self.wireframeLinewidth = wireframeLinewidth
self.vertexColors = vertexColors
self.properties = kwargs
def lower(self, object_data):
# Three.js allows a material to have an opacity which is != 1,
# but to still be non-transparent, in which case the opacity only
# serves to desaturate the material's color. That's a pretty odd
# combination of things to want, so by default we juse use the
# opacity value to decide whether to set transparent to True or
# False.
if self.transparent is None:
transparent = bool(self.opacity != 1)
else:
transparent = self.transparent
data = {
u"uuid": self.uuid,
u"type": self._type,
u"color": self.color,
u"reflectivity": self.reflectivity,
u"side": self.side,
u"transparent": transparent,
u"opacity": self.opacity,
u"linewidth": self.linewidth,
u"wireframe": bool(self.wireframe),
u"wireframeLinewidth": self.wireframeLinewidth,
u"vertexColors": (2 if self.vertexColors else 0), # three.js wants an enum
}
data.update(self.properties)
if self.map is not None:
data[u"map"] = self.map.lower_in_object(object_data)
return data
class MeshBasicMaterial(GenericMaterial):
_type=u"MeshBasicMaterial"
class MeshPhongMaterial(GenericMaterial):
_type=u"MeshPhongMaterial"
class MeshLambertMaterial(GenericMaterial):
_type=u"MeshLambertMaterial"
class MeshToonMaterial(GenericMaterial):
_type=u"MeshToonMaterial"
class LineBasicMaterial(GenericMaterial):
_type=u"LineBasicMaterial"
class PngImage(Image):
def __init__(self, data):
super(PngImage, self).__init__()
self.data = data
@staticmethod
def from_file(fname):
with open(fname, "rb") as f:
return PngImage(f.read())
def lower(self, object_data):
return {
u"uuid": self.uuid,
u"url": unicode("data:image/png;base64," + base64.b64encode(self.data).decode('ascii'))
}
class GenericTexture(Texture):
def __init__(self, properties):
super(GenericTexture, self).__init__()
self.properties = properties
def lower(self, object_data):
data = {u"uuid": self.uuid}
data.update(self.properties)
if u"image" in data:
image = data[u"image"]
data[u"image"] = image.lower_in_object(object_data)
return data
class ImageTexture(Texture):
def __init__(self, image, wrap=[1001, 1001], repeat=[1, 1], **kwargs):
super(ImageTexture, self).__init__()
self.image = image
self.wrap = wrap
self.repeat = repeat
self.properties = kwargs
def lower(self, object_data):
data = {
u"uuid": self.uuid,
u"wrap": self.wrap,
u"repeat": self.repeat,
u"image": self.image.lower_in_object(object_data)
}
data.update(self.properties)
return data
class Object(SceneElement):
def __init__(self, geometry, material=MeshPhongMaterial()):
super(Object, self).__init__()
self.geometry = geometry
self.material = material
def lower(self):
data = {
u"metadata": {
u"version": 4.5,
u"type": u"Object",
},
u"geometries": [],
u"materials": [],
u"object": {
u"uuid": self.uuid,
u"type": self._type,
u"geometry": self.geometry.uuid,
u"material": self.material.uuid,
u"matrix": list(self.geometry.intrinsic_transform().flatten())
}
}
self.geometry.lower_in_object(data)
self.material.lower_in_object(data)
return data
class Mesh(Object):
_type = u"Mesh"
class OrthographicCamera(SceneElement):
def __init__(self, left, right, top, bottom, near, far, zoom=1):
super(OrthographicCamera, self).__init__()
self.left = left
self.right = right
self.top = top
self.bottom = bottom
self.near = near
self.far = far
self.zoom = zoom
def lower(self):
data = {
u"object": {
u"uuid": self.uuid,
u"type": u"OrthographicCamera",
u"left": self.left,
u"right": self.right,
u"top": self.top,
u"bottom": self.bottom,
u"near": self.near,
u"far": self.far,
u"zoom": self.zoom,
}
}
return data
def item_size(array):
if array.ndim == 1:
return 1
elif array.ndim == 2:
return array.shape[0]
else:
raise ValueError("I can only pack 1- or 2-dimensional numpy arrays, but this one has {:d} dimensions".format(array.ndim))
def threejs_type(dtype):
if dtype == np.uint8:
return u"Uint8Array", 0x12
elif dtype == np.int32:
return u"Int32Array", 0x15
elif dtype == np.uint32:
return u"Uint32Array", 0x16
elif dtype == np.float32:
return u"Float32Array", 0x17
else:
raise ValueError("Unsupported datatype: " + str(dtype))
def pack_numpy_array(x):
if x.dtype == np.float64:
x = x.astype(np.float32)
typename, extcode = threejs_type(x.dtype)
return {
u"itemSize": item_size(x),
u"type": typename,
u"array": umsgpack.Ext(extcode, x.tobytes('F')),
u"normalized": False
}
def data_from_stream(stream):
if sys.version_info >= (3, 0):
if isinstance(stream, BytesIO):
data = stream.read().decode(encoding='utf-8')
elif isinstance(stream, StringIO):
data = stream.read()
else:
raise ValueError('Stream must be instance of StringIO or BytesIO, not {}'.format(type(stream)))
else:
data = stream.read()
return data
class MeshGeometry(Geometry):
def __init__(self, contents, mesh_format):
super(MeshGeometry, self).__init__()
self.contents = contents
self.mesh_format = mesh_format
def lower(self, object_data):
return {
u"type": u"_meshfile",
u"uuid": self.uuid,
u"format": self.mesh_format,
u"data": self.contents
}
class ObjMeshGeometry(MeshGeometry):
def __init__(self, contents):
super(ObjMeshGeometry, self, contents, u"obj").__init__()
@staticmethod
def from_file(fname):
with open(fname, "r") as f:
return MeshGeometry(f.read(), u"obj")
@staticmethod
def from_stream(f):
return MeshGeometry(data_from_stream(f), u"obj")
class DaeMeshGeometry(MeshGeometry):
def __init__(self, contents):
super(DaeMeshGeometry, self, contents, u"dae").__init__()
@staticmethod
def from_file(fname):
with open(fname, "r") as f:
return MeshGeometry(f.read(), u"dae")
@staticmethod
def from_stream(f):
return MeshGeometry(data_from_stream(f), u"dae")
class StlMeshGeometry(MeshGeometry):
def __init__(self, contents):
super(StlMeshGeometry, self, contents, u"stl").__init__()
@staticmethod
def from_file(fname):
with open(fname, "rb") as f:
arr = np.frombuffer(f.read(), dtype=np.uint8)
_, extcode = threejs_type(np.uint8)
encoded = umsgpack.Ext(extcode, arr.tobytes())
return MeshGeometry(encoded, u"stl")
@staticmethod
def from_stream(f):
if sys.version_info >= (3, 0):
if isinstance(f, BytesIO):
arr = np.frombuffer(f.read(), dtype=np.uint8)
elif isinstance(f, StringIO):
arr = np.frombuffer(bytes(f.read(), "utf-8"), dtype=np.uint8)
else:
raise ValueError('Stream must be instance of StringIO or BytesIO, not {}'.format(type(f)))
else:
arr = np.frombuffer(f.read(), dtype=np.uint8)
_, extcode = threejs_type(np.uint8)
encoded = umsgpack.Ext(extcode, arr.tobytes())
return MeshGeometry(encoded, u"stl")
class TriangularMeshGeometry(Geometry):
"""
A mesh consisting of an arbitrary collection of triangular faces. To construct one, you need to pass in a collection of vertices as an Nx3 array and a collection of faces as an Mx3 array. Each element of `faces` should be a collection of 3 indices into the `vertices` array.
For example, to create a square made out of two adjacent triangles, we could do:
vertices = np.array([
[0, 0, 0], # the first vertex is at [0, 0, 0]
[1, 0, 0],
[1, 0, 1],
[0, 0, 1]
])
faces = np.array([
[0, 1, 2], # The first face consists of vertices 0, 1, and 2
[3, 0, 2]
])
mesh = TriangularMeshGeometry(vertices, faces)
"""
__slots__ = ["vertices", "faces"]
def __init__(self, vertices, faces):
super(TriangularMeshGeometry, self).__init__()
vertices = np.asarray(vertices, dtype=np.float32)
faces = np.asarray(faces, dtype=np.uint32)
assert vertices.shape[1] == 3, "`vertices` must be an Nx3 array"
assert faces.shape[1] == 3, "`faces` must be an Mx3 array"
self.vertices = vertices
self.faces = faces
def lower(self, object_data):
return {
u"uuid": self.uuid,
u"type": u"BufferGeometry",
u"data": {
u"attributes": {
u"position": pack_numpy_array(self.vertices.T)
},
u"index": pack_numpy_array(self.faces.T)
}
}
class PointsGeometry(Geometry):
def __init__(self, position, color=None):
super(PointsGeometry, self).__init__()
self.position = position
self.color = color
def lower(self, object_data):
attrs = {u"position": pack_numpy_array(self.position)}
if self.color is not None:
attrs[u"color"] = pack_numpy_array(self.color)
return {
u"uuid": self.uuid,
u"type": u"BufferGeometry",
u"data": {
u"attributes": attrs
}
}
class PointsMaterial(Material):
def __init__(self, size=0.001, color=0xffffff):
super(PointsMaterial, self).__init__()
self.size = size
self.color = color
def lower(self, object_data):
return {
u"uuid": self.uuid,
u"type": u"PointsMaterial",
u"color": self.color,
u"size": self.size,
u"vertexColors": 2
}
class Points(Object):
_type = u"Points"
def PointCloud(position, color, **kwargs):
return Points(
PointsGeometry(position, color),
PointsMaterial(**kwargs)
)
class Line(Object):
_type = u"Line"
class LineSegments(Object):
_type = u"LineSegments"
class LineLoop(Object):
_type = u"LineLoop"
def triad(scale=1.0):
"""
A visual representation of the origin of a coordinate system, drawn as three
lines in red, green, and blue along the x, y, and z axes. The `scale` parameter
controls the length of the three lines.
Returns an `Object` which can be passed to `set_object()`
"""
return LineSegments(
PointsGeometry(position=np.array([
[0, 0, 0], [scale, 0, 0],
[0, 0, 0], [0, scale, 0],
[0, 0, 0], [0, 0, scale]]).astype(np.float32).T,
color=np.array([
[1, 0, 0], [1, 0.6, 0],
[0, 1, 0], [0.6, 1, 0],
[0, 0, 1], [0, 0.6, 1]]).astype(np.float32).T
),
LineBasicMaterial(vertexColors=True))
| [
"numpy.asarray",
"numpy.hstack",
"uuid.uuid1",
"numpy.array",
"base64.b64encode"
] | [((12821, 12859), 'numpy.asarray', 'np.asarray', (['vertices'], {'dtype': 'np.float32'}), '(vertices, dtype=np.float32)\n', (12831, 12859), True, 'import numpy as np\n'), ((12876, 12910), 'numpy.asarray', 'np.asarray', (['faces'], {'dtype': 'np.uint32'}), '(faces, dtype=np.uint32)\n', (12886, 12910), True, 'import numpy as np\n'), ((409, 421), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (419, 421), False, 'import uuid\n'), ((2022, 2050), 'numpy.hstack', 'np.hstack', (['(self.radii, 1.0)'], {}), '((self.radii, 1.0))\n', (2031, 2050), True, 'import numpy as np\n'), ((5711, 5738), 'base64.b64encode', 'base64.b64encode', (['self.data'], {}), '(self.data)\n', (5727, 5738), False, 'import base64\n'), ((15135, 15227), 'numpy.array', 'np.array', (['[[0, 0, 0], [scale, 0, 0], [0, 0, 0], [0, scale, 0], [0, 0, 0], [0, 0, scale]]'], {}), '([[0, 0, 0], [scale, 0, 0], [0, 0, 0], [0, scale, 0], [0, 0, 0], [0,\n 0, scale]])\n', (15143, 15227), True, 'import numpy as np\n'), ((15301, 15388), 'numpy.array', 'np.array', (['[[1, 0, 0], [1, 0.6, 0], [0, 1, 0], [0.6, 1, 0], [0, 0, 1], [0, 0.6, 1]]'], {}), '([[1, 0, 0], [1, 0.6, 0], [0, 1, 0], [0.6, 1, 0], [0, 0, 1], [0, \n 0.6, 1]])\n', (15309, 15388), True, 'import numpy as np\n')] |
"""
Script plots relationship between vertical warming in modeled data sets
Notes
-----
Author : <NAME>
Date : 21 February 2020
"""
### Import modules
import datetime
import numpy as np
import matplotlib.pyplot as plt
import cmocean
import calc_Utilities as UT
import scipy.stats as sts
import read_CTLNQ as CONT
import read_ExpMonthly as NUDG
import read_ShortCoupled as COUP
import read_SIT as THICK
import read_SIC as CONC
### Define directories
directoryfigure = '/home/zlabe/Desktop/AA/Vertical_Model/'
### Define time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
titletime = currentmn + '/' + currentdy + '/' + currentyr
print('\n' '----Plotting Vertical Warming- %s----' % titletime)
### Add parameters
datareader = False
latpolar = 65.
cps = 'none'
variable = 'TEMP'
period = 'DJF'
level = 'profile'
letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m"]
if cps == 'none':
runnames = [r'$\Delta$AA-2030',r'$\Delta$AA-2060',r'$\Delta$AA-2090',
r'$\Delta$WACCM-SIC-Pd',r'$\Delta$S-Coupled-Pd',r'$\Delta$WACCM-SIT-Pd']
#elif cps == 'yes':
# runnames = [r'$\Delta$AA-2030',r'$\Delta$AA-2060',r'$\Delta$AA-2090-cps',
# r'$\Delta$S-Coupled-Pd',r'$\Delta$SIT-Pd',r'$\Delta$SIC-Pd']
runnamesdata = ['AA-2030','AA-2060','AA-2090','SIC','coupled','SIT']
### Function to read in data
def readData(simu,period,varia,level,cps):
###############################################################################
###############################################################################
###############################################################################
if simu == 'AA-2030':
lat,lon,lev,future = NUDG.readExperi(varia,'AA','2030',level,'none')
lat,lon,lev,historical = CONT.readControl(varia,level,'none')
elif simu == 'AA-2060':
lat,lon,lev,future = NUDG.readExperi(varia,'AA','2060',level,'none')
lat,lon,lev,historical = CONT.readControl(varia,level,'none')
elif simu == 'AA-2090':
lat,lon,lev,future = NUDG.readExperi(varia,'AA','2090',level,cps)
lat,lon,lev,historical = CONT.readControl(varia,level,cps)
###############################################################################
elif simu == 'coupled':
lat,lon,lev,future = COUP.readCOUPs(varia,'C_Fu',level)
lat,lon,lev,historical = COUP.readCOUPs(varia,'C_Pd',level)
###############################################################################
elif simu == 'SIT':
lat,lon,lev,future = THICK.readSIT(varia,'SIT_Fu',level)
lat,lon,lev,historical = THICK.readSIT(varia,'SIT_Pd',level)
###############################################################################
elif simu == 'SIC':
lat,lon,lev,future = CONC.readSIC(varia,'Fu',level)
lat,lon,lev,historical = CONC.readSIC(varia,'Pd',level)
###############################################################################
###############################################################################
###############################################################################
### Calculate number of ensembles
nens = np.shape(historical)[0]
### Check for missing data [ensembles,months,lat,lon]
future[np.where(future <= -1e10)] = np.nan
historical[np.where(historical <= -1e10)] = np.nan
###############################################################################
###############################################################################
###############################################################################
### Calculate over period
if period == 'OND':
print('Calculating over %s months!' % period)
futurem = np.nanmean(future[:,-3:],axis=1)
historicalm = np.nanmean(historical[:,-3:],axis=1)
elif period == 'D':
print('Calculating over %s months!' % period)
futurem = np.nanmean(future[:,-1:],axis=1)
historicalm = np.nanmean(historical[:,-1:],axis=1)
elif period == 'DJF':
print('Calculating over %s months!' % period)
runs = [future,historical]
var_mo = np.empty((2,historical.shape[0]-1,historical.shape[2],historical.shape[3],historical.shape[4]))
for i in range(len(runs)):
var_mo[i,:,:,:,:] = UT.calcDecJanFeb(runs[i],runs[i],lat,lon,level,17)
futurem = var_mo[0]
historicalm = var_mo[1]
elif period == 'JFM':
print('Calculating over %s months!' % period)
futurem = np.nanmean(future[:,0:3],axis=1)
historicalm = np.nanmean(historical[:,0:3],axis=1)
elif period == 'JF':
print('Calculating over %s months!' % period)
futurem = np.nanmean(future[:,0:2],axis=1)
historicalm = np.nanmean(historical[:,0:2],axis=1)
elif period == 'FMA':
print('Calculating over %s months!' % period)
futurem = np.nanmean(future[:,1:4],axis=1)
historicalm = np.nanmean(historical[:,1:4],axis=1)
elif period == 'FM':
print('Calculating over %s months!' % period)
futurem = np.nanmean(future[:,1:3],axis=1)
historicalm = np.nanmean(historical[:,1:3],axis=1)
elif period == 'J':
print('Calculating over %s months!' % period)
futurem = np.nanmean(future[:,0:1],axis=1)
historicalm = np.nanmean(historical[:,0:1],axis=1)
elif period == 'F':
print('Calculating over %s months!' % period)
futurem = np.nanmean(future[:,1:2],axis=1)
historicalm = np.nanmean(historical[:,1:2],axis=1)
elif period == 'M':
print('Calculating over %s months!' % period)
futurem = np.nanmean(future[:,2:3],axis=1)
historicalm = np.nanmean(historical[:,2:3],axis=1)
elif period == 'MA':
print('Calculating over %s months!' % period)
futurem = np.nanmean(future[:,2:4],axis=1)
historicalm = np.nanmean(historical[:,2:4],axis=1)
else:
print(ValueError('Selected wrong month period!'))
###########################################################################
###########################################################################
###########################################################################
### Calculate zonal means
futuremz = np.nanmean(futurem,axis=3)
historicalmz = np.nanmean(historicalm,axis=3)
### Calculate anomalies [ens,level,lat]
anom = futuremz - historicalmz
### Calculate ensemble mean
anommean = np.nanmean(anom,axis=0)
### Calculate significance
pruns = UT.calc_FDR_ttest(futuremz,historicalmz,0.05) #FDR
### Select climo
climo = np.nanmean(historicalmz,axis=0)
return lat,lon,lev,anommean,nens,pruns,climo
### Call data
#lat,lon,lev,anomAA30,nensAA30,prunsAA30,climoAA30 = readData('AA-2030',period,variable,level,cps)
#lat,lon,lev,anomAA60,nensAA60,prunsAA60,climoAA60 = readData('AA-2060',period,variable,level,cps)
#lat,lon,lev,anomAA90,nensAA90,prunsAA90,climoAA90 = readData('AA-2090',period,variable,level,cps)
#lat,lon,lev,anomcoup,nensCOUP,prunsCOUP,climoCOUP = readData('SIC',period,variable,level,cps)
#lat,lon,lev,anomthic,nensTHIC,prunsTHIC,climoTHIC = readData('coupled',period,variable,level,cps)
#lat,lon,lev,anomconc,nensCONC,prunsCONC,climoCONC = readData('SIT',period,variable,level,cps)
#
#### Chunk data
#dataall = [anomAA30,anomAA60,anomAA90,anomcoup,anomthic,anomconc]
#nensall = [nensAA30,nensAA60,nensAA90,nensCOUP,nensTHIC,nensCONC]
#pall = [prunsAA30,prunsAA60,prunsAA90,prunsCOUP,prunsTHIC,prunsCONC]
#climoall =[climoAA30,climoAA60,climoAA90,climoCOUP,climoTHIC,climoCONC]
###########################################################################
###########################################################################
###########################################################################
##### Plot profiles
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 2))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
### Set limits for contours and colorbars
if variable == 'TEMP':
limit = np.arange(-3,3.01,0.25)
barlim = np.arange(-3,4,1)
cmap = cmocean.cm.balance
label = r'\textbf{$^{\circ}$C}'
zscale = np.array([1000,925,850,700,500,300,200])
latq,levq = np.meshgrid(lat,lev)
fig = plt.figure()
for i in range(len(runnames)):
varnomask = dataall[i]
pvar = pall[i]
clim = climoall[i]
en = nensall[i]
### Mask significant
pvar[np.isnan(pvar)] = 0.
var = varnomask * pvar
var[var == 0.] = np.nan
### Create plot
ax1 = plt.subplot(2,3,i+1)
ax1.spines['top'].set_color('dimgrey')
ax1.spines['right'].set_color('dimgrey')
ax1.spines['bottom'].set_color('dimgrey')
ax1.spines['left'].set_color('dimgrey')
ax1.spines['left'].set_linewidth(2)
ax1.spines['bottom'].set_linewidth(2)
ax1.spines['right'].set_linewidth(2)
ax1.spines['top'].set_linewidth(2)
if i == 0:
ax1.tick_params(axis='y',direction='out',which='major',pad=3,
width=2,color='dimgrey')
plt.gca().axes.get_yaxis().set_visible(True)
plt.gca().axes.get_xaxis().set_visible(False)
plt.ylabel(r'\textbf{Pressure [hPa]}',color='k',fontsize=7)
elif i == 3:
ax1.tick_params(axis='x',direction='out',which='major',pad=3,
width=2,color='dimgrey')
ax1.tick_params(axis='y',direction='out',which='major',pad=3,
width=2,color='dimgrey')
plt.gca().axes.get_xaxis().set_visible(True)
plt.gca().axes.get_yaxis().set_visible(True)
plt.ylabel(r'\textbf{Pressure [hPa]}',color='k',fontsize=7)
elif i == 4 or i == 5:
ax1.tick_params(axis='x',direction='out',which='major',pad=3,
width=2,color='dimgrey')
plt.gca().axes.get_xaxis().set_visible(True)
plt.gca().axes.get_yaxis().set_visible(False)
else:
ax1.tick_params(axis='y',direction='out',which='major',pad=3,
width=0,color='w')
plt.gca().axes.get_yaxis().set_visible(False)
plt.gca().axes.get_xaxis().set_visible(False)
if i == 3 or i == 5:
plt.xlabel(r'\textbf{Latitude [$\bf{^{\circ}}$N]}',color='k',fontsize=7)
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
### Add levels
plt.axhline(lev[1],linewidth=0.6,linestyle='--',dashes=(1,0.5),
color='k')
plt.axhline(lev[2],linewidth=0.6,linestyle='--',dashes=(1,0.5),
color='k')
plt.axhline(lev[3],linewidth=0.6,linestyle='--',dashes=(1,0.5),
color='k')
plt.axhline(lev[5],linewidth=0.6,linestyle='--',dashes=(1,0.5),
color='k')
plt.axhline(lev[7],linewidth=0.6,linestyle='--',dashes=(1,0.5),
color='k')
if i < 3:
plt.vlines(70,ymin=1000,ymax=600,linestyle='-',color='blue',
linewidth=1.5)
plt.vlines(90,ymin=1000,ymax=600,linestyle='-',color='blue',
linewidth=1.5,zorder=10)
plt.hlines(600,xmin=70,xmax=90,linestyle='-',color='blue',
linewidth=1.5)
plt.hlines(1000,xmin=70,xmax=90,linestyle='-',color='blue',
linewidth=1.5,zorder=11)
### Plot contours
cs = plt.contourf(latq,levq,var,limit,extend='both')
# cs1 = plt.contour(latq,levq,clim,np.arange(-90,95,5),
# linewidths=0.3,colors='k',extend='both')
# cs2 = plt.contourf(latq,levq,pvar,colors='None',
# hatches=['//////'],linewidths=0.4)
cs.set_cmap(cmap)
plt.gca().invert_yaxis()
plt.yscale('log',nonposy='clip')
plt.xticks(np.arange(-90,91,15),map(str,np.arange(-90,91,15)),fontsize=6)
plt.yticks(zscale,map(str,zscale),ha='right',fontsize=6)
plt.xlim([45,90])
if variable == 'TEMP' or variable == 'GEOP':
plt.ylim([1000,200])
else:
plt.ylim([1000,10])
plt.minorticks_off()
ax1.annotate(r'\textbf{%s}' % runnames[i],xy=(80,200),xytext=(0.98,0.93),
textcoords='axes fraction',color='k',fontsize=8,
rotation=0,ha='right',va='center')
ax1.annotate(r'\textbf{[%s]}' % letters[i],xy=(80,200),xytext=(0.02,0.93),
textcoords='axes fraction',color='k',fontsize=8,
rotation=0,ha='left',va='center')
# ax1.annotate(r'\textbf{[%s]}' % en,xy=(80,200),xytext=(0.02,0.07),
# textcoords='axes fraction',color='dimgrey',fontsize=8,
# rotation=0,ha='left',va='center')
###########################################################################
plt.tight_layout()
cbar_ax = fig.add_axes([0.33,0.08,0.4,0.03])
cbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',
extend='max',extendfrac=0.07,drawedges=False)
cbar.set_label(label,fontsize=11,color='dimgrey',labelpad=1.4)
cbar.set_ticks(barlim)
cbar.set_ticklabels(list(map(str,barlim)))
cbar.ax.tick_params(axis='x', size=.001,labelsize=6)
cbar.outline.set_edgecolor('dimgrey')
plt.subplots_adjust(bottom=0.17,hspace=0.08,wspace=0.08)
if cps == 'none':
plt.savefig(directoryfigure + 'VerticalModels_TEMP_%s.png' % period,
dpi=300)
elif cps == 'yes':
plt.savefig(directoryfigure + 'VerticalModels_TEMP_%s_CPS.png' % period,
dpi=300)
print('Completed: Script done!') | [
"matplotlib.pyplot.yscale",
"numpy.empty",
"numpy.isnan",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.gca",
"calc_Utilities.calcDecJanFeb",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.hlines",
"numpy.nanmean",
"numpy.me... | [((555, 578), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (576, 578), False, 'import datetime\n'), ((8053, 8080), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (8059, 8080), True, 'import matplotlib.pyplot as plt\n'), ((8080, 8153), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **{'family': 'sans-serif', 'sans-serif': ['Avant Garde']})\n", (8086, 8153), True, 'import matplotlib.pyplot as plt\n'), ((8888, 8900), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8898, 8900), True, 'import matplotlib.pyplot as plt\n'), ((13236, 13254), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13252, 13254), True, 'import matplotlib.pyplot as plt\n'), ((13676, 13734), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.17)', 'hspace': '(0.08)', 'wspace': '(0.08)'}), '(bottom=0.17, hspace=0.08, wspace=0.08)\n', (13695, 13734), True, 'import matplotlib.pyplot as plt\n'), ((6449, 6476), 'numpy.nanmean', 'np.nanmean', (['futurem'], {'axis': '(3)'}), '(futurem, axis=3)\n', (6459, 6476), True, 'import numpy as np\n'), ((6495, 6526), 'numpy.nanmean', 'np.nanmean', (['historicalm'], {'axis': '(3)'}), '(historicalm, axis=3)\n', (6505, 6526), True, 'import numpy as np\n'), ((6658, 6682), 'numpy.nanmean', 'np.nanmean', (['anom'], {'axis': '(0)'}), '(anom, axis=0)\n', (6668, 6682), True, 'import numpy as np\n'), ((6730, 6777), 'calc_Utilities.calc_FDR_ttest', 'UT.calc_FDR_ttest', (['futuremz', 'historicalmz', '(0.05)'], {}), '(futuremz, historicalmz, 0.05)\n', (6747, 6777), True, 'import calc_Utilities as UT\n'), ((6819, 6851), 'numpy.nanmean', 'np.nanmean', (['historicalmz'], {'axis': '(0)'}), '(historicalmz, axis=0)\n', (6829, 6851), True, 'import numpy as np\n'), ((8661, 8686), 'numpy.arange', 'np.arange', (['(-3)', '(3.01)', '(0.25)'], {}), '(-3, 3.01, 0.25)\n', (8670, 8686), True, 'import numpy as np\n'), ((8698, 8717), 'numpy.arange', 'np.arange', (['(-3)', '(4)', '(1)'], {}), '(-3, 4, 1)\n', (8707, 8717), True, 'import numpy as np\n'), ((8795, 8841), 'numpy.array', 'np.array', (['[1000, 925, 850, 700, 500, 300, 200]'], {}), '([1000, 925, 850, 700, 500, 300, 200])\n', (8803, 8841), True, 'import numpy as np\n'), ((8852, 8873), 'numpy.meshgrid', 'np.meshgrid', (['lat', 'lev'], {}), '(lat, lev)\n', (8863, 8873), True, 'import numpy as np\n'), ((9176, 9200), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(i + 1)'], {}), '(2, 3, i + 1)\n', (9187, 9200), True, 'import matplotlib.pyplot as plt\n'), ((10965, 11043), 'matplotlib.pyplot.axhline', 'plt.axhline', (['lev[1]'], {'linewidth': '(0.6)', 'linestyle': '"""--"""', 'dashes': '(1, 0.5)', 'color': '"""k"""'}), "(lev[1], linewidth=0.6, linestyle='--', dashes=(1, 0.5), color='k')\n", (10976, 11043), True, 'import matplotlib.pyplot as plt\n'), ((11060, 11138), 'matplotlib.pyplot.axhline', 'plt.axhline', (['lev[2]'], {'linewidth': '(0.6)', 'linestyle': '"""--"""', 'dashes': '(1, 0.5)', 'color': '"""k"""'}), "(lev[2], linewidth=0.6, linestyle='--', dashes=(1, 0.5), color='k')\n", (11071, 11138), True, 'import matplotlib.pyplot as plt\n'), ((11155, 11233), 'matplotlib.pyplot.axhline', 'plt.axhline', (['lev[3]'], {'linewidth': '(0.6)', 'linestyle': '"""--"""', 'dashes': '(1, 0.5)', 'color': '"""k"""'}), "(lev[3], linewidth=0.6, linestyle='--', dashes=(1, 0.5), color='k')\n", (11166, 11233), True, 'import matplotlib.pyplot as plt\n'), ((11250, 11328), 'matplotlib.pyplot.axhline', 'plt.axhline', (['lev[5]'], {'linewidth': '(0.6)', 'linestyle': '"""--"""', 'dashes': '(1, 0.5)', 'color': '"""k"""'}), "(lev[5], linewidth=0.6, linestyle='--', dashes=(1, 0.5), color='k')\n", (11261, 11328), True, 'import matplotlib.pyplot as plt\n'), ((11345, 11423), 'matplotlib.pyplot.axhline', 'plt.axhline', (['lev[7]'], {'linewidth': '(0.6)', 'linestyle': '"""--"""', 'dashes': '(1, 0.5)', 'color': '"""k"""'}), "(lev[7], linewidth=0.6, linestyle='--', dashes=(1, 0.5), color='k')\n", (11356, 11423), True, 'import matplotlib.pyplot as plt\n'), ((11920, 11971), 'matplotlib.pyplot.contourf', 'plt.contourf', (['latq', 'levq', 'var', 'limit'], {'extend': '"""both"""'}), "(latq, levq, var, limit, extend='both')\n", (11932, 11971), True, 'import matplotlib.pyplot as plt\n'), ((12261, 12294), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {'nonposy': '"""clip"""'}), "('log', nonposy='clip')\n", (12271, 12294), True, 'import matplotlib.pyplot as plt\n'), ((12447, 12465), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[45, 90]'], {}), '([45, 90])\n', (12455, 12465), True, 'import matplotlib.pyplot as plt\n'), ((12585, 12605), 'matplotlib.pyplot.minorticks_off', 'plt.minorticks_off', ([], {}), '()\n', (12603, 12605), True, 'import matplotlib.pyplot as plt\n'), ((13757, 13834), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(directoryfigure + 'VerticalModels_TEMP_%s.png' % period)"], {'dpi': '(300)'}), "(directoryfigure + 'VerticalModels_TEMP_%s.png' % period, dpi=300)\n", (13768, 13834), True, 'import matplotlib.pyplot as plt\n'), ((1824, 1875), 'read_ExpMonthly.readExperi', 'NUDG.readExperi', (['varia', '"""AA"""', '"""2030"""', 'level', '"""none"""'], {}), "(varia, 'AA', '2030', level, 'none')\n", (1839, 1875), True, 'import read_ExpMonthly as NUDG\n'), ((1905, 1943), 'read_CTLNQ.readControl', 'CONT.readControl', (['varia', 'level', '"""none"""'], {}), "(varia, level, 'none')\n", (1921, 1943), True, 'import read_CTLNQ as CONT\n'), ((3326, 3346), 'numpy.shape', 'np.shape', (['historical'], {}), '(historical)\n', (3334, 3346), True, 'import numpy as np\n'), ((3420, 3454), 'numpy.where', 'np.where', (['(future <= -10000000000.0)'], {}), '(future <= -10000000000.0)\n', (3428, 3454), True, 'import numpy as np\n'), ((3471, 3509), 'numpy.where', 'np.where', (['(historical <= -10000000000.0)'], {}), '(historical <= -10000000000.0)\n', (3479, 3509), True, 'import numpy as np\n'), ((3897, 3931), 'numpy.nanmean', 'np.nanmean', (['future[:, -3:]'], {'axis': '(1)'}), '(future[:, -3:], axis=1)\n', (3907, 3931), True, 'import numpy as np\n'), ((3952, 3990), 'numpy.nanmean', 'np.nanmean', (['historical[:, -3:]'], {'axis': '(1)'}), '(historical[:, -3:], axis=1)\n', (3962, 3990), True, 'import numpy as np\n'), ((9065, 9079), 'numpy.isnan', 'np.isnan', (['pvar'], {}), '(pvar)\n', (9073, 9079), True, 'import numpy as np\n'), ((9782, 9843), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""\\\\textbf{Pressure [hPa]}"""'], {'color': '"""k"""', 'fontsize': '(7)'}), "('\\\\textbf{Pressure [hPa]}', color='k', fontsize=7)\n", (9792, 9843), True, 'import matplotlib.pyplot as plt\n'), ((10779, 10855), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\textbf{Latitude [$\\\\bf{^{\\\\circ}}$N]}"""'], {'color': '"""k"""', 'fontsize': '(7)'}), "('\\\\textbf{Latitude [$\\\\bf{^{\\\\circ}}$N]}', color='k', fontsize=7)\n", (10789, 10855), True, 'import matplotlib.pyplot as plt\n'), ((11463, 11542), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(70)'], {'ymin': '(1000)', 'ymax': '(600)', 'linestyle': '"""-"""', 'color': '"""blue"""', 'linewidth': '(1.5)'}), "(70, ymin=1000, ymax=600, linestyle='-', color='blue', linewidth=1.5)\n", (11473, 11542), True, 'import matplotlib.pyplot as plt\n'), ((11566, 11661), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(90)'], {'ymin': '(1000)', 'ymax': '(600)', 'linestyle': '"""-"""', 'color': '"""blue"""', 'linewidth': '(1.5)', 'zorder': '(10)'}), "(90, ymin=1000, ymax=600, linestyle='-', color='blue', linewidth=\n 1.5, zorder=10)\n", (11576, 11661), True, 'import matplotlib.pyplot as plt\n'), ((11679, 11756), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(600)'], {'xmin': '(70)', 'xmax': '(90)', 'linestyle': '"""-"""', 'color': '"""blue"""', 'linewidth': '(1.5)'}), "(600, xmin=70, xmax=90, linestyle='-', color='blue', linewidth=1.5)\n", (11689, 11756), True, 'import matplotlib.pyplot as plt\n'), ((11780, 11874), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(1000)'], {'xmin': '(70)', 'xmax': '(90)', 'linestyle': '"""-"""', 'color': '"""blue"""', 'linewidth': '(1.5)', 'zorder': '(11)'}), "(1000, xmin=70, xmax=90, linestyle='-', color='blue', linewidth=\n 1.5, zorder=11)\n", (11790, 11874), True, 'import matplotlib.pyplot as plt\n'), ((12314, 12336), 'numpy.arange', 'np.arange', (['(-90)', '(91)', '(15)'], {}), '(-90, 91, 15)\n', (12323, 12336), True, 'import numpy as np\n'), ((12522, 12543), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[1000, 200]'], {}), '([1000, 200])\n', (12530, 12543), True, 'import matplotlib.pyplot as plt\n'), ((12561, 12581), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[1000, 10]'], {}), '([1000, 10])\n', (12569, 12581), True, 'import matplotlib.pyplot as plt\n'), ((13870, 13955), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(directoryfigure + 'VerticalModels_TEMP_%s_CPS.png' % period)"], {'dpi': '(300)'}), "(directoryfigure + 'VerticalModels_TEMP_%s_CPS.png' % period,\n dpi=300)\n", (13881, 13955), True, 'import matplotlib.pyplot as plt\n'), ((1999, 2050), 'read_ExpMonthly.readExperi', 'NUDG.readExperi', (['varia', '"""AA"""', '"""2060"""', 'level', '"""none"""'], {}), "(varia, 'AA', '2060', level, 'none')\n", (2014, 2050), True, 'import read_ExpMonthly as NUDG\n'), ((2080, 2118), 'read_CTLNQ.readControl', 'CONT.readControl', (['varia', 'level', '"""none"""'], {}), "(varia, level, 'none')\n", (2096, 2118), True, 'import read_CTLNQ as CONT\n'), ((4085, 4119), 'numpy.nanmean', 'np.nanmean', (['future[:, -1:]'], {'axis': '(1)'}), '(future[:, -1:], axis=1)\n', (4095, 4119), True, 'import numpy as np\n'), ((4140, 4178), 'numpy.nanmean', 'np.nanmean', (['historical[:, -1:]'], {'axis': '(1)'}), '(historical[:, -1:], axis=1)\n', (4150, 4178), True, 'import numpy as np\n'), ((10206, 10267), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""\\\\textbf{Pressure [hPa]}"""'], {'color': '"""k"""', 'fontsize': '(7)'}), "('\\\\textbf{Pressure [hPa]}', color='k', fontsize=7)\n", (10216, 10267), True, 'import matplotlib.pyplot as plt\n'), ((12232, 12241), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12239, 12241), True, 'import matplotlib.pyplot as plt\n'), ((12343, 12365), 'numpy.arange', 'np.arange', (['(-90)', '(91)', '(15)'], {}), '(-90, 91, 15)\n', (12352, 12365), True, 'import numpy as np\n'), ((2174, 2222), 'read_ExpMonthly.readExperi', 'NUDG.readExperi', (['varia', '"""AA"""', '"""2090"""', 'level', 'cps'], {}), "(varia, 'AA', '2090', level, cps)\n", (2189, 2222), True, 'import read_ExpMonthly as NUDG\n'), ((2252, 2287), 'read_CTLNQ.readControl', 'CONT.readControl', (['varia', 'level', 'cps'], {}), '(varia, level, cps)\n', (2268, 2287), True, 'import read_CTLNQ as CONT\n'), ((4309, 4415), 'numpy.empty', 'np.empty', (['(2, historical.shape[0] - 1, historical.shape[2], historical.shape[3],\n historical.shape[4])'], {}), '((2, historical.shape[0] - 1, historical.shape[2], historical.shape\n [3], historical.shape[4]))\n', (4317, 4415), True, 'import numpy as np\n'), ((2428, 2464), 'read_ShortCoupled.readCOUPs', 'COUP.readCOUPs', (['varia', '"""C_Fu"""', 'level'], {}), "(varia, 'C_Fu', level)\n", (2442, 2464), True, 'import read_ShortCoupled as COUP\n'), ((2496, 2532), 'read_ShortCoupled.readCOUPs', 'COUP.readCOUPs', (['varia', '"""C_Pd"""', 'level'], {}), "(varia, 'C_Pd', level)\n", (2510, 2532), True, 'import read_ShortCoupled as COUP\n'), ((4472, 4527), 'calc_Utilities.calcDecJanFeb', 'UT.calcDecJanFeb', (['runs[i]', 'runs[i]', 'lat', 'lon', 'level', '(17)'], {}), '(runs[i], runs[i], lat, lon, level, 17)\n', (4488, 4527), True, 'import calc_Utilities as UT\n'), ((4682, 4716), 'numpy.nanmean', 'np.nanmean', (['future[:, 0:3]'], {'axis': '(1)'}), '(future[:, 0:3], axis=1)\n', (4692, 4716), True, 'import numpy as np\n'), ((4737, 4775), 'numpy.nanmean', 'np.nanmean', (['historical[:, 0:3]'], {'axis': '(1)'}), '(historical[:, 0:3], axis=1)\n', (4747, 4775), True, 'import numpy as np\n'), ((2684, 2721), 'read_SIT.readSIT', 'THICK.readSIT', (['varia', '"""SIT_Fu"""', 'level'], {}), "(varia, 'SIT_Fu', level)\n", (2697, 2721), True, 'import read_SIT as THICK\n'), ((2753, 2790), 'read_SIT.readSIT', 'THICK.readSIT', (['varia', '"""SIT_Pd"""', 'level'], {}), "(varia, 'SIT_Pd', level)\n", (2766, 2790), True, 'import read_SIT as THICK\n'), ((4871, 4905), 'numpy.nanmean', 'np.nanmean', (['future[:, 0:2]'], {'axis': '(1)'}), '(future[:, 0:2], axis=1)\n', (4881, 4905), True, 'import numpy as np\n'), ((4926, 4964), 'numpy.nanmean', 'np.nanmean', (['historical[:, 0:2]'], {'axis': '(1)'}), '(historical[:, 0:2], axis=1)\n', (4936, 4964), True, 'import numpy as np\n'), ((2927, 2959), 'read_SIC.readSIC', 'CONC.readSIC', (['varia', '"""Fu"""', 'level'], {}), "(varia, 'Fu', level)\n", (2939, 2959), True, 'import read_SIC as CONC\n'), ((2991, 3023), 'read_SIC.readSIC', 'CONC.readSIC', (['varia', '"""Pd"""', 'level'], {}), "(varia, 'Pd', level)\n", (3003, 3023), True, 'import read_SIC as CONC\n'), ((5061, 5095), 'numpy.nanmean', 'np.nanmean', (['future[:, 1:4]'], {'axis': '(1)'}), '(future[:, 1:4], axis=1)\n', (5071, 5095), True, 'import numpy as np\n'), ((5116, 5154), 'numpy.nanmean', 'np.nanmean', (['historical[:, 1:4]'], {'axis': '(1)'}), '(historical[:, 1:4], axis=1)\n', (5126, 5154), True, 'import numpy as np\n'), ((9675, 9684), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9682, 9684), True, 'import matplotlib.pyplot as plt\n'), ((9728, 9737), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9735, 9737), True, 'import matplotlib.pyplot as plt\n'), ((5250, 5284), 'numpy.nanmean', 'np.nanmean', (['future[:, 1:3]'], {'axis': '(1)'}), '(future[:, 1:3], axis=1)\n', (5260, 5284), True, 'import numpy as np\n'), ((5305, 5343), 'numpy.nanmean', 'np.nanmean', (['historical[:, 1:3]'], {'axis': '(1)'}), '(historical[:, 1:3], axis=1)\n', (5315, 5343), True, 'import numpy as np\n'), ((10100, 10109), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10107, 10109), True, 'import matplotlib.pyplot as plt\n'), ((10153, 10162), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10160, 10162), True, 'import matplotlib.pyplot as plt\n'), ((5438, 5472), 'numpy.nanmean', 'np.nanmean', (['future[:, 0:1]'], {'axis': '(1)'}), '(future[:, 0:1], axis=1)\n', (5448, 5472), True, 'import numpy as np\n'), ((5493, 5531), 'numpy.nanmean', 'np.nanmean', (['historical[:, 0:1]'], {'axis': '(1)'}), '(historical[:, 0:1], axis=1)\n', (5503, 5531), True, 'import numpy as np\n'), ((10419, 10428), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10426, 10428), True, 'import matplotlib.pyplot as plt\n'), ((10472, 10481), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10479, 10481), True, 'import matplotlib.pyplot as plt\n'), ((10637, 10646), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10644, 10646), True, 'import matplotlib.pyplot as plt\n'), ((10691, 10700), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10698, 10700), True, 'import matplotlib.pyplot as plt\n'), ((5626, 5660), 'numpy.nanmean', 'np.nanmean', (['future[:, 1:2]'], {'axis': '(1)'}), '(future[:, 1:2], axis=1)\n', (5636, 5660), True, 'import numpy as np\n'), ((5681, 5719), 'numpy.nanmean', 'np.nanmean', (['historical[:, 1:2]'], {'axis': '(1)'}), '(historical[:, 1:2], axis=1)\n', (5691, 5719), True, 'import numpy as np\n'), ((5814, 5848), 'numpy.nanmean', 'np.nanmean', (['future[:, 2:3]'], {'axis': '(1)'}), '(future[:, 2:3], axis=1)\n', (5824, 5848), True, 'import numpy as np\n'), ((5869, 5907), 'numpy.nanmean', 'np.nanmean', (['historical[:, 2:3]'], {'axis': '(1)'}), '(historical[:, 2:3], axis=1)\n', (5879, 5907), True, 'import numpy as np\n'), ((6003, 6037), 'numpy.nanmean', 'np.nanmean', (['future[:, 2:4]'], {'axis': '(1)'}), '(future[:, 2:4], axis=1)\n', (6013, 6037), True, 'import numpy as np\n'), ((6058, 6096), 'numpy.nanmean', 'np.nanmean', (['historical[:, 2:4]'], {'axis': '(1)'}), '(historical[:, 2:4], axis=1)\n', (6068, 6096), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input function hook for PPO TF estimator.
For the PPO algorithm, see https://arxiv.org/abs/1707.06347.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import gin
import numpy as np
import tensorflow.compat.v1 as tf
from polish.ppo import ppo_loss
from polish.utils import distributions
from polish.utils import host_call_fn
from polish.utils import tf_layers
from tensorflow.contrib import tpu as contrib_tpu
logging.set_verbosity(logging.INFO)
@gin.configurable
class PpoModelFn(object):
"""Main class for model function used in tf.estimator.
Attributes:
policy_loss: Proximal Policy Optimization (PPO) policy loss.
value_loss: PPO value loss.
entropy_loss: PPO entropy loss.
imitation_kl_divergence: The KL-divergence of action distributions between
the policy and MCTS.
total_loss: PPO total loss.
clipfrac: Fraction of examples in a batch clipped by PPO.
approxkl: `Approximate` KL divergence between new policy and old policy.
This is an estimate (approximate) of the KL divergence, since we compute
the KL divergence using the samples drawn from the new and
old distributions.
total_params: Total trainable parameters.
train_op: Training operation.
mean_new: Mean of new policy distribution.
logstd_new: Log standard deviation of new policy distribution.
mean_old: Mean of old policy distribution.
logstd_old: Log standard deviation of old policy distribution.
value_new: state-value from the latest trained state-value network.
kl_divergence: Kullback-Leibler divergence between new and old policy.
entropy: Entropy of the new policy.
global_step: Global step of training.
policy_ratio: the ratio between new policy and old policy.
last_iteration_mcts_enable: Track the sampling type (PPO sampling/MCTS) in
the process of training. That is, whether in the last iteration of
training, we use PPO sampling (False) or MCTS sampling (True).
mcts_sampling_enable: If True, it means that the current batch
is generated by MCTS.
mean_mse_loss: Mean squared error between the mean value of
policy distribuiton and the mean value returned by MCTS given a state.
logstd_mse_loss: Mean squared error between log of standard deviation value
of the policy distribuiton and the log of standard deviation value
returned by MCTS given a state.
"""
def __init__(
self,
env_action_space=2,
iterations_per_loop=320,
num_timesteps=1000000,
max_horizon=2048,
learning_rate=3e-4,
use_tpu=False,
ppo2_enable=True,
policy_coeff=1.0,
value_coeff=0.5,
entropy_coeff=0.0,
tpu_num_shards=8,
mse_loss_coeff=0.0,
warmstart_file=None,
policy_hidden_layer_size=64,
value_hidden_layer_size=64):
"""Creates a model function for PPO algorithm.
The default values for all the parameters are from PPO paper.
Args:
env_action_space: The size of environment action space.
iterations_per_loop: Number of steps to run on TPU before outfeeding
metrics to the CPU. If the number of iterations in the loop would exceed
the number of train steps, the loop will exit before reaching
--iterations_per_loop. The larger this value is, the higher the
utilization on the TPU.
num_timesteps: Total number of timesteps. Defines the total number of
samples taken from the environment during the whole process of training.
max_horizon: Maximum number of samples taken from the environment before
starting training.
learning_rate: Initial learning rate value. Note that the actual learning
rate is linearly decayed.
use_tpu: If True, training occurs on TPU.
ppo2_enable: If True, we use the next version of PPO algorithm, known as
PPO2. In this version, not only does the probability ratio get clipped,
but also the clipping is performed on the value loss.
For more information:
https://github.com/openai/baselines/tree/master/baselines/ppo2.
policy_coeff: Policy loss coefficient in the total loss calculation.
value_coeff: Value loss coefficient in the total loss calculation.
entropy_coeff: Entropy loss coefficient in the total loss calculation.
tpu_num_shards: Number of TPU shards.
mse_loss_coeff: The coefficient for Mean Squared Error (MSE) loss.
warmstart_file: If not None, we restore the weights for the parameters in
`newpolicy` scope from this file. `newpolicy` scope contains both
policy and value network.
policy_hidden_layer_size: The size of hidden layer in policy network.
Currently, this value is used for both of the hidden layers.
value_hidden_layer_size: The size of hidden layer in value network.
Currently, this value is used for both of the hidden layers.
"""
self.policy_loss = 0
self.value_loss = 0
self.entropy_loss = 0
self.total_loss = 0
self.clipfrac = 0
self.approxkl = 0
self.policy_ratio = 0
self.total_params = None
self.train_op = None
self.mean_new = None
self.logstd_new = None
self.mean_old = None
self.logstd_old = None
self.value_new = None
self.kl_divergence = None
self.entropy = 0
self.global_step = None
self._decayed_learning_rate = None
self._env_action_space = env_action_space
self._iterations_per_loop = iterations_per_loop
self._num_timesteps = num_timesteps
self._max_horizon = max_horizon
self._learning_rate = learning_rate
self._use_tpu = use_tpu
self._ppo2_enable = ppo2_enable
self._policy_coeff = policy_coeff
self._value_coeff = value_coeff
self._entropy_coeff = entropy_coeff
self._tpu_num_shards = tpu_num_shards
self._mse_loss_coeff = mse_loss_coeff
self._warmstart_file = warmstart_file
self.last_iteration_mcts_enable = False
self._mcts_global_step = 0
self._policy_hidden_layer_size = policy_hidden_layer_size
self._value_hidden_layer_size = value_hidden_layer_size
def __call__(self, features, labels, mode, params):
return self.model_fn(features, labels, mode, params)
def model_inference_fn_ppo(self, features, prefix):
"""Builds just the inference part of the model graph.
Args:
features: input features tensor.
prefix: prefix to be added to the network.
Returns:
(value, var, mean) tuple of tensors.
"""
# Policy Network
features = tf.layers.flatten(features)
with tf.variable_scope(prefix + 'policy', reuse=tf.AUTO_REUSE):
policy_1 = tf.tanh(
tf_layers.fc(
tensor_in=features,
num_hidden=self._policy_hidden_layer_size,
scope_name='/policy_1',
init_scale=np.sqrt(2)))
policy_2 = tf.tanh(
tf_layers.fc(
tensor_in=policy_1,
num_hidden=self._policy_hidden_layer_size,
scope_name='/policy_2',
init_scale=np.sqrt(2)))
mean = tf_layers.fc(
tensor_in=policy_2,
num_hidden=self._env_action_space,
scope_name='/mean',
init_scale=0.01,
init_bias=0.0)
logstd_var = tf.get_variable(
name=prefix + '_logstd',
shape=[1, self._env_action_space],
initializer=tf.zeros_initializer())
# Evaluate logstd_var and broadcast to have a same shape as mean
logstd = tf.multiply(logstd_var, 1.0)
value_1 = tf.tanh(
tf_layers.fc(
tensor_in=features,
num_hidden=self._value_hidden_layer_size,
scope_name='/value_1',
init_scale=np.sqrt(2)))
value_2 = tf.tanh(
tf_layers.fc(
tensor_in=value_1,
num_hidden=self._value_hidden_layer_size,
scope_name='/value_2',
init_scale=np.sqrt(2)))
value = tf_layers.fc(
tensor_in=value_2, num_hidden=1, scope_name='/value')[:, 0]
return value, logstd, mean
def learning_rate_update_true_fn(self):
"""The function which is performed if the predicate is true.
The predicate that calls this function is defined in 'update_learning_rate'.
Returns:
The current global step.
"""
return tf.train.get_global_step()
def learning_rate_update_false_fn(self):
"""The function which is performed if the predicate is false.
The predicate that calls this function is defined in 'update_learning_rate'.
Returns:
A type-casted value of `_mcts_global_step` to int64.
`_mcts_global_step` is the global step at which MCTS algorithm starts.
The type casting is necessary as the type of returned tensor in `true_fn`
is an int.64.
"""
return tf.cast(self._mcts_global_step, tf.int64)
def update_learning_rate(self):
"""Update the learning rate with a decaying factor.
"""
self._current_global_step = tf.cond(
tf.equal(self.mcts_sampling_enable,
True), lambda: self._mcts_global_step, lambda: 0)
self._current_global_step = tf.cast(self._current_global_step, tf.int64)
update = (tf.train.get_global_step() -
self._current_global_step) // self._iterations_per_loop + 1
current_frac = self._num_timesteps // self._max_horizon
update = tf.cast(update, tf.float32)
current_frac = tf.cast(current_frac, tf.float32)
frac = 1.0 - (update - 1.0) / current_frac
self._decayed_learning_rate = self._learning_rate * frac
self._mcts_global_step = tf.cond(
tf.not_equal(self.mcts_sampling_enable,
self.last_iteration_mcts_enable),
self.learning_rate_update_true_fn, self.learning_rate_update_false_fn)
self.last_iteration_mcts_enable = self.mcts_sampling_enable
def build_training_op(self, loss):
"""Get training operation.
Args:
loss: a loss function for training.
Define the optimization operation and perform gradient calculation for both
TPU/Non-TPU training.
Returns:
Computed gradient.
"""
adam_optimizer = tf.train.AdamOptimizer(
learning_rate=self._decayed_learning_rate, epsilon=1e-5)
if self._use_tpu:
# If we use TPUs, reduce_mean runs on each chip separately and by default
# only the loss of the first chip is reported.
#
# You can either:
# - execute this if, which synchronizes the losses
# across the chips to obtain the full loss on all samples.
# - or remove this section, gaining some performance and getting the
# loss only from the first chip.
# compute gradients perform averaging of the loss
adam_optimizer = tf.tpu.CrossShardOptimizer(adam_optimizer)
tpu_sum_loss = contrib_tpu.cross_replica_sum(loss / self._tpu_num_shards)
grads_and_vars = adam_optimizer.compute_gradients(tpu_sum_loss,
self.total_params)
grads, var = zip(*grads_and_vars)
sum_grads = []
sum_vars = []
for (grad, var) in grads_and_vars:
if grad is None:
sum_grads.append(grad)
sum_vars.append(var)
else:
sum_grads.append(
contrib_tpu.cross_replica_sum(grad) / self._tpu_num_shards)
sum_vars.append(var)
# calculate sum of grads
norm_grads, _ = tf.clip_by_global_norm(sum_grads, 0.5)
grads_and_vars = list(zip(norm_grads, sum_vars))
else:
grads_and_vars = adam_optimizer.compute_gradients(loss,
self.total_params)
grads, var = zip(*grads_and_vars)
norm_grads, _ = tf.clip_by_global_norm(grads, 0.5)
grads_and_vars = list(zip(norm_grads, var))
return adam_optimizer.apply_gradients(
grads_and_vars, global_step=tf.train.get_global_step())
def calc_normalized_advantage(self, return_tensor, value_tensor):
"""Compute General Advantage Estimation (GAE) and normalize it.
Note that, the advantage calculation-normalization is performed for a batch
of data.
Args:
return_tensor: The discounted accumulated reward (return) calculated
for the given rollout trajectory.
value_tensor: The value for each state for the given rollout trajectory.
Returns:
Returns the normalized General Advantage Estimation (GAE).
"""
batch_advantage = return_tensor - value_tensor
batch_advantage_std = tf.keras.backend.std(batch_advantage)
batch_advantage_mean = tf.reduce_mean(batch_advantage)
batch_advantage_norm = (batch_advantage - batch_advantage_mean) / (
batch_advantage_std + 1e-8)
return batch_advantage_norm
def create_host_call_fn(self, params):
"""Create host call function.
`host_call` function is later called by TPU estimator to
send some metrics to host for logging.
Args:
params: A dictionary of hyperparameters passed to the tf.estimator.
Returns:
A host call function that generates a set of tf summaries.
"""
names_and_tensors = [
('Batch_Params/mean_mse_loss', self.mean_mse_loss),
('Batch_Params/logstd_mse_loss', self.logstd_mse_loss),
('Batch_Params/policy_loss', self.policy_loss),
('Batch_Params/mcts_enable', self.mcts_sampling_enable),
('Batch_Params/value_loss', self.value_loss),
('Batch_Params/policy_entropy', self.entropy_loss),
('Batch_Params/imitation_kl_divergence', self.imitation_kl_divergence),
('Batch_Params/clip_fraction', self.clipfrac),
('Batch_Params/max_ratio', tf.reduce_max(self.policy_ratio)),
('Batch_Params/min_ratio', tf.reduce_min(self.policy_ratio)),
('Batch_Params/mean_ratio', tf.reduce_mean(self.policy_ratio)),
('Batch_Params/approx_kl', self.approxkl),
('Learning_Rate/learning_rate', self._decayed_learning_rate),
('Learning_Rate/global_step', tf.train.get_global_step())
]
return host_call_fn.build_host_call_fn_every_n_global_steps(
params=params,
names_and_tensors=names_and_tensors,
n=self._iterations_per_loop)
def compute_total_loss(self, pd_new, pd_old, value_tensor, return_tensor,
batch_advantage_norm,
policy_old_neg_logprob_tensor,
policy_action_tensor):
"""Defines the total loss function.
Args:
pd_new: The current policy distribution
(a multivariate normal distribution). This policy distribution gets
updated in the course of training.
pd_old: The old policy distribution that we use during sampling the
trajectory (a multivariate normal distribution).
value_tensor: The values associated to the rollout trajectory.
return_tensor: The return values computed for the rollout trajectory.
batch_advantage_norm: The normalized advantage tensor computed for a
batch of data. For advantage calculation, we use generalized
advantage estimation (GAE) formula.
policy_old_neg_logprob_tensor: The negative log probabilities from the
policy rollouts.
policy_action_tensor: The actions from the policy rollouts.
"""
# Policy loss
ppo_policy_loss_out = ppo_loss.ppo_policy_loss(
neg_logprobs_old=policy_old_neg_logprob_tensor,
actions=policy_action_tensor,
advantages=batch_advantage_norm,
dist_new=pd_new,
mcts_sampling=self.mcts_sampling_enable)
(self.policy_loss, self.approxkl, self.clipfrac,
self.policy_ratio) = ppo_policy_loss_out
# Value Loss
if self._ppo2_enable:
self.value_loss = ppo_loss.ppo2_value_loss(
value_old=value_tensor,
pred_value=self.value_new,
returns=return_tensor)
else:
self.value_loss = ppo_loss.ppo1_value_loss(
pred_value=self.value_new, returns=return_tensor)
# MSE loss between mean and standard deviations
self.mean_mse_loss, self.logstd_mse_loss = ppo_loss.l2_norm_policy_loss(
policy_mean=self.mean_new,
policy_logstd=self.logstd_new,
mcts_mean=self.mean_old,
mcts_logstd=self.logstd_old)
mcts_dist = distributions.MultiVariateNormalDiag(
mean=self.mean_old, logstd=self.logstd_old)
policy_dist = distributions.MultiVariateNormalDiag(
mean=self.mean_new, logstd=self.logstd_new)
self.imitation_kl_divergence = tf.reduce_mean(
policy_dist.kl_divergence(mcts_dist))
# Calculate KL divergence and entropy of new distribution
self.kl_divergence = tf.reduce_mean(pd_new.kl_divergence(pd_old))
self.entropy = pd_new.entropy()
# Calculate entropy loss
self.entropy_loss = tf.reduce_mean(self.entropy)
# Calulate total loss
total_loss_ppo = (self._policy_coeff * self.policy_loss) + (
self._value_coeff * self.value_loss) - (
self._entropy_coeff * self.entropy_loss)
total_loss_mcts = (self._value_coeff * self.value_loss) + (
self._mse_loss_coeff *
(self.imitation_kl_divergence + self.entropy_loss))
self.total_loss = tf.cond(
tf.equal(self.mcts_sampling_enable,
True), lambda: total_loss_mcts, lambda: total_loss_ppo)
def model_fn(self, features, labels, mode, params):
"""The implementation of PPO algorithm.
Args:
features: dict from string to tensor with shape
'state_tensor': [BATCH_SIZE, env.state_space]
labels: dict from string to tensor with shape
'action_tensor': [BATCH_SIZE, self._env_action_space]
'advantage_tensor': [BATCH_SIZE]
'returns_tensor': [BATCH_SIZE]
mode: a tf.estimator.ModeKeys (batchnorm params update for TRAIN only).
params: (Ignored; needed for compat with TPUEstimator).
Returns:
tf.estimator.EstimatorSpec with props.
mode: same as mode arg.
predictions: dict of tensors
'mean': [BATCH_SIZE, self._env_action_space]
'logstd': [BATCH_SIZE, self._env_action_space]
'value': [BATCH_SIZE]
'action': [BATCH_SIZE, self._env_action_space]
'neg_logprob': [BATCH_SIZE, self._env_action_space]
loss: a single value tensor.
train_op: train op eval_metric_ops return dict of tensors.
"""
# Policy network
network_out = self.model_inference_fn_ppo(features['mcts_features'], 'new')
self.value_new = network_out[0]
self.logstd_new = network_out[1]
self.mean_new = network_out[2]
self.global_step = tf.train.get_or_create_global_step()
# Sample an action
pd_new = distributions.MultiVariateNormalDiag(
mean=self.mean_new, logstd=self.logstd_new)
action_sample = pd_new.sample()
action_sample_neg_logprob = pd_new.negative_log_prob(action_sample)
# Used during TF estimator prediction
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'mean': self.mean_new,
'logstd': self.logstd_new,
'value': self.value_new,
'action': action_sample,
'neg_logprob': action_sample_neg_logprob
}
pred_estimator = tf.estimator.tpu.TPUEstimatorSpec(
mode,
predictions=predictions,
export_outputs={
'ppo_inference':
tf.estimator.export.PredictOutput({
'mean': self.mean_new,
'logstd': self.logstd_new,
'value': self.value_new,
'action': action_sample,
'neg_logprob': action_sample_neg_logprob
})
})
return pred_estimator.as_estimator_spec()
# Placeholder
self.mcts_sampling_enable = tf.reduce_all(labels['mcts_enable_tensor'])
self.mean_old = labels['mean_tensor']
self.logstd_old = labels['logstd_tensor']
pd_old = distributions.MultiVariateNormalDiag(
mean=self.mean_old, logstd=self.logstd_old)
batch_advantage_norm = self.calc_normalized_advantage(
return_tensor=labels['policy_return_tensor'],
value_tensor=labels['policy_value_tensor'])
self.compute_total_loss(pd_new, pd_old, labels['value_tensor'],
labels['return_tensor'], batch_advantage_norm,
labels['policy_old_neg_logprob_tensor'],
labels['policy_action_tensor'])
# Update learning rate
self.update_learning_rate()
# Build training operation
self.total_params = tf.trainable_variables(scope='newpolicy')
train_ops = self.build_training_op(self.total_loss)
host_call = self.create_host_call_fn(params)
if mode != tf.estimator.ModeKeys.TRAIN:
raise ValueError('Estimator mode should be train at this point.')
if mode == tf.estimator.ModeKeys.TRAIN:
# Setup fine tune scaffold
# The scaffold here is used to restore the weights from _warmstart_file.
# If _warmstart_file is None, the training starts from the beginning.
if self._warmstart_file:
logging.info('Warmstart')
def tpu_scaffold():
# restore all the variables
tf.init_from_checkpoint(self._warmstart_file,
{'newpolicy/': 'newpolicy/'})
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
scaffold_fn = None
tpu_estimator_spec = tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=self.total_loss,
train_op=train_ops,
host_call=host_call,
scaffold_fn=scaffold_fn)
if self._use_tpu:
return tpu_estimator_spec
else:
return tpu_estimator_spec.as_estimator_spec()
| [
"polish.utils.tf_layers.fc",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.init_from_checkpoint",
"absl.logging.info",
"tensorflow.compat.v1.estimator.export.PredictOutput",
"polish.utils.distributions.MultiVariateNormalDiag",
"absl.logging.set_verbosity",
"... | [((1112, 1147), 'absl.logging.set_verbosity', 'logging.set_verbosity', (['logging.INFO'], {}), '(logging.INFO)\n', (1133, 1147), False, 'from absl import logging\n'), ((7245, 7272), 'tensorflow.compat.v1.layers.flatten', 'tf.layers.flatten', (['features'], {}), '(features)\n', (7262, 7272), True, 'import tensorflow.compat.v1 as tf\n'), ((9048, 9074), 'tensorflow.compat.v1.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (9072, 9074), True, 'import tensorflow.compat.v1 as tf\n'), ((9536, 9577), 'tensorflow.compat.v1.cast', 'tf.cast', (['self._mcts_global_step', 'tf.int64'], {}), '(self._mcts_global_step, tf.int64)\n', (9543, 9577), True, 'import tensorflow.compat.v1 as tf\n'), ((9862, 9906), 'tensorflow.compat.v1.cast', 'tf.cast', (['self._current_global_step', 'tf.int64'], {}), '(self._current_global_step, tf.int64)\n', (9869, 9906), True, 'import tensorflow.compat.v1 as tf\n'), ((10098, 10125), 'tensorflow.compat.v1.cast', 'tf.cast', (['update', 'tf.float32'], {}), '(update, tf.float32)\n', (10105, 10125), True, 'import tensorflow.compat.v1 as tf\n'), ((10145, 10178), 'tensorflow.compat.v1.cast', 'tf.cast', (['current_frac', 'tf.float32'], {}), '(current_frac, tf.float32)\n', (10152, 10178), True, 'import tensorflow.compat.v1 as tf\n'), ((10870, 10955), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self._decayed_learning_rate', 'epsilon': '(1e-05)'}), '(learning_rate=self._decayed_learning_rate, epsilon=1e-05\n )\n', (10892, 10955), True, 'import tensorflow.compat.v1 as tf\n'), ((13246, 13283), 'tensorflow.compat.v1.keras.backend.std', 'tf.keras.backend.std', (['batch_advantage'], {}), '(batch_advantage)\n', (13266, 13283), True, 'import tensorflow.compat.v1 as tf\n'), ((13311, 13342), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['batch_advantage'], {}), '(batch_advantage)\n', (13325, 13342), True, 'import tensorflow.compat.v1 as tf\n'), ((14775, 14912), 'polish.utils.host_call_fn.build_host_call_fn_every_n_global_steps', 'host_call_fn.build_host_call_fn_every_n_global_steps', ([], {'params': 'params', 'names_and_tensors': 'names_and_tensors', 'n': 'self._iterations_per_loop'}), '(params=params,\n names_and_tensors=names_and_tensors, n=self._iterations_per_loop)\n', (14827, 14912), False, 'from polish.utils import host_call_fn\n'), ((16062, 16264), 'polish.ppo.ppo_loss.ppo_policy_loss', 'ppo_loss.ppo_policy_loss', ([], {'neg_logprobs_old': 'policy_old_neg_logprob_tensor', 'actions': 'policy_action_tensor', 'advantages': 'batch_advantage_norm', 'dist_new': 'pd_new', 'mcts_sampling': 'self.mcts_sampling_enable'}), '(neg_logprobs_old=policy_old_neg_logprob_tensor,\n actions=policy_action_tensor, advantages=batch_advantage_norm, dist_new\n =pd_new, mcts_sampling=self.mcts_sampling_enable)\n', (16086, 16264), False, 'from polish.ppo import ppo_loss\n'), ((16815, 16960), 'polish.ppo.ppo_loss.l2_norm_policy_loss', 'ppo_loss.l2_norm_policy_loss', ([], {'policy_mean': 'self.mean_new', 'policy_logstd': 'self.logstd_new', 'mcts_mean': 'self.mean_old', 'mcts_logstd': 'self.logstd_old'}), '(policy_mean=self.mean_new, policy_logstd=self.\n logstd_new, mcts_mean=self.mean_old, mcts_logstd=self.logstd_old)\n', (16843, 16960), False, 'from polish.ppo import ppo_loss\n'), ((17006, 17091), 'polish.utils.distributions.MultiVariateNormalDiag', 'distributions.MultiVariateNormalDiag', ([], {'mean': 'self.mean_old', 'logstd': 'self.logstd_old'}), '(mean=self.mean_old, logstd=self.logstd_old\n )\n', (17042, 17091), False, 'from polish.utils import distributions\n'), ((17114, 17199), 'polish.utils.distributions.MultiVariateNormalDiag', 'distributions.MultiVariateNormalDiag', ([], {'mean': 'self.mean_new', 'logstd': 'self.logstd_new'}), '(mean=self.mean_new, logstd=self.logstd_new\n )\n', (17150, 17199), False, 'from polish.utils import distributions\n'), ((17523, 17551), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['self.entropy'], {}), '(self.entropy)\n', (17537, 17551), True, 'import tensorflow.compat.v1 as tf\n'), ((19374, 19410), 'tensorflow.compat.v1.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (19408, 19410), True, 'import tensorflow.compat.v1 as tf\n'), ((19447, 19532), 'polish.utils.distributions.MultiVariateNormalDiag', 'distributions.MultiVariateNormalDiag', ([], {'mean': 'self.mean_new', 'logstd': 'self.logstd_new'}), '(mean=self.mean_new, logstd=self.logstd_new\n )\n', (19483, 19532), False, 'from polish.utils import distributions\n'), ((20560, 20603), 'tensorflow.compat.v1.reduce_all', 'tf.reduce_all', (["labels['mcts_enable_tensor']"], {}), "(labels['mcts_enable_tensor'])\n", (20573, 20603), True, 'import tensorflow.compat.v1 as tf\n'), ((20706, 20791), 'polish.utils.distributions.MultiVariateNormalDiag', 'distributions.MultiVariateNormalDiag', ([], {'mean': 'self.mean_old', 'logstd': 'self.logstd_old'}), '(mean=self.mean_old, logstd=self.logstd_old\n )\n', (20742, 20791), False, 'from polish.utils import distributions\n'), ((21350, 21391), 'tensorflow.compat.v1.trainable_variables', 'tf.trainable_variables', ([], {'scope': '"""newpolicy"""'}), "(scope='newpolicy')\n", (21372, 21391), True, 'import tensorflow.compat.v1 as tf\n'), ((7282, 7339), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (["(prefix + 'policy')"], {'reuse': 'tf.AUTO_REUSE'}), "(prefix + 'policy', reuse=tf.AUTO_REUSE)\n", (7299, 7339), True, 'import tensorflow.compat.v1 as tf\n'), ((7788, 7911), 'polish.utils.tf_layers.fc', 'tf_layers.fc', ([], {'tensor_in': 'policy_2', 'num_hidden': 'self._env_action_space', 'scope_name': '"""/mean"""', 'init_scale': '(0.01)', 'init_bias': '(0.0)'}), "(tensor_in=policy_2, num_hidden=self._env_action_space,\n scope_name='/mean', init_scale=0.01, init_bias=0.0)\n", (7800, 7911), False, 'from polish.utils import tf_layers\n'), ((8207, 8235), 'tensorflow.compat.v1.multiply', 'tf.multiply', (['logstd_var', '(1.0)'], {}), '(logstd_var, 1.0)\n', (8218, 8235), True, 'import tensorflow.compat.v1 as tf\n'), ((9726, 9767), 'tensorflow.compat.v1.equal', 'tf.equal', (['self.mcts_sampling_enable', '(True)'], {}), '(self.mcts_sampling_enable, True)\n', (9734, 9767), True, 'import tensorflow.compat.v1 as tf\n'), ((10333, 10405), 'tensorflow.compat.v1.not_equal', 'tf.not_equal', (['self.mcts_sampling_enable', 'self.last_iteration_mcts_enable'], {}), '(self.mcts_sampling_enable, self.last_iteration_mcts_enable)\n', (10345, 10405), True, 'import tensorflow.compat.v1 as tf\n'), ((11465, 11507), 'tensorflow.compat.v1.tpu.CrossShardOptimizer', 'tf.tpu.CrossShardOptimizer', (['adam_optimizer'], {}), '(adam_optimizer)\n', (11491, 11507), True, 'import tensorflow.compat.v1 as tf\n'), ((11530, 11588), 'tensorflow.contrib.tpu.cross_replica_sum', 'contrib_tpu.cross_replica_sum', (['(loss / self._tpu_num_shards)'], {}), '(loss / self._tpu_num_shards)\n', (11559, 11588), True, 'from tensorflow.contrib import tpu as contrib_tpu\n'), ((12146, 12184), 'tensorflow.compat.v1.clip_by_global_norm', 'tf.clip_by_global_norm', (['sum_grads', '(0.5)'], {}), '(sum_grads, 0.5)\n', (12168, 12184), True, 'import tensorflow.compat.v1 as tf\n'), ((12449, 12483), 'tensorflow.compat.v1.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads', '(0.5)'], {}), '(grads, 0.5)\n', (12471, 12483), True, 'import tensorflow.compat.v1 as tf\n'), ((16465, 16567), 'polish.ppo.ppo_loss.ppo2_value_loss', 'ppo_loss.ppo2_value_loss', ([], {'value_old': 'value_tensor', 'pred_value': 'self.value_new', 'returns': 'return_tensor'}), '(value_old=value_tensor, pred_value=self.value_new,\n returns=return_tensor)\n', (16489, 16567), False, 'from polish.ppo import ppo_loss\n'), ((16629, 16703), 'polish.ppo.ppo_loss.ppo1_value_loss', 'ppo_loss.ppo1_value_loss', ([], {'pred_value': 'self.value_new', 'returns': 'return_tensor'}), '(pred_value=self.value_new, returns=return_tensor)\n', (16653, 16703), False, 'from polish.ppo import ppo_loss\n'), ((17942, 17983), 'tensorflow.compat.v1.equal', 'tf.equal', (['self.mcts_sampling_enable', '(True)'], {}), '(self.mcts_sampling_enable, True)\n', (17950, 17983), True, 'import tensorflow.compat.v1 as tf\n'), ((22239, 22376), 'tensorflow.compat.v1.estimator.tpu.TPUEstimatorSpec', 'tf.estimator.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'self.total_loss', 'train_op': 'train_ops', 'host_call': 'host_call', 'scaffold_fn': 'scaffold_fn'}), '(mode=mode, loss=self.total_loss, train_op\n =train_ops, host_call=host_call, scaffold_fn=scaffold_fn)\n', (22272, 22376), True, 'import tensorflow.compat.v1 as tf\n'), ((8678, 8744), 'polish.utils.tf_layers.fc', 'tf_layers.fc', ([], {'tensor_in': 'value_2', 'num_hidden': '(1)', 'scope_name': '"""/value"""'}), "(tensor_in=value_2, num_hidden=1, scope_name='/value')\n", (8690, 8744), False, 'from polish.utils import tf_layers\n'), ((12614, 12640), 'tensorflow.compat.v1.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (12638, 12640), True, 'import tensorflow.compat.v1 as tf\n'), ((14393, 14425), 'tensorflow.compat.v1.reduce_max', 'tf.reduce_max', (['self.policy_ratio'], {}), '(self.policy_ratio)\n', (14406, 14425), True, 'import tensorflow.compat.v1 as tf\n'), ((14463, 14495), 'tensorflow.compat.v1.reduce_min', 'tf.reduce_min', (['self.policy_ratio'], {}), '(self.policy_ratio)\n', (14476, 14495), True, 'import tensorflow.compat.v1 as tf\n'), ((14534, 14567), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['self.policy_ratio'], {}), '(self.policy_ratio)\n', (14548, 14567), True, 'import tensorflow.compat.v1 as tf\n'), ((14729, 14755), 'tensorflow.compat.v1.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (14753, 14755), True, 'import tensorflow.compat.v1 as tf\n'), ((21888, 21913), 'absl.logging.info', 'logging.info', (['"""Warmstart"""'], {}), "('Warmstart')\n", (21900, 21913), False, 'from absl import logging\n'), ((8097, 8119), 'tensorflow.compat.v1.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (8117, 8119), True, 'import tensorflow.compat.v1 as tf\n'), ((9922, 9948), 'tensorflow.compat.v1.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (9946, 9948), True, 'import tensorflow.compat.v1 as tf\n'), ((21990, 22065), 'tensorflow.compat.v1.init_from_checkpoint', 'tf.init_from_checkpoint', (['self._warmstart_file', "{'newpolicy/': 'newpolicy/'}"], {}), "(self._warmstart_file, {'newpolicy/': 'newpolicy/'})\n", (22013, 22065), True, 'import tensorflow.compat.v1 as tf\n'), ((22117, 22136), 'tensorflow.compat.v1.train.Scaffold', 'tf.train.Scaffold', ([], {}), '()\n', (22134, 22136), True, 'import tensorflow.compat.v1 as tf\n'), ((7545, 7555), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7552, 7555), True, 'import numpy as np\n'), ((7762, 7772), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7769, 7772), True, 'import numpy as np\n'), ((8438, 8448), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (8445, 8448), True, 'import numpy as np\n'), ((8651, 8661), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (8658, 8661), True, 'import numpy as np\n'), ((20140, 20326), 'tensorflow.compat.v1.estimator.export.PredictOutput', 'tf.estimator.export.PredictOutput', (["{'mean': self.mean_new, 'logstd': self.logstd_new, 'value': self.value_new,\n 'action': action_sample, 'neg_logprob': action_sample_neg_logprob}"], {}), "({'mean': self.mean_new, 'logstd': self.\n logstd_new, 'value': self.value_new, 'action': action_sample,\n 'neg_logprob': action_sample_neg_logprob})\n", (20173, 20326), True, 'import tensorflow.compat.v1 as tf\n'), ((12002, 12037), 'tensorflow.contrib.tpu.cross_replica_sum', 'contrib_tpu.cross_replica_sum', (['grad'], {}), '(grad)\n', (12031, 12037), True, 'from tensorflow.contrib import tpu as contrib_tpu\n')] |
# coding=utf-8
# Copyright (c) 2021-present, Data-driven Intelligent System Research Center (DIRECT), National Institute of Information and Communications Technology (NICT). (Modifications for BERTAC)
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
""" Modified from examples/run_glue.py in the original HuggingFace Transformers for caching feature files before training/testing.
Other model settings except for ALBERT and RoBERTa have been commented out or deleted for simplicity.
The following libraries/functions/classes have been added/modified:
- Libraries: torchtext, cnn_utils, and train_utils are additionaly imported
- Functions/classes:
class TTDataset(torchtext.data.Dataset):
def load_cnn_model_and_vocab():
def load_and_cache_examples(): the main body of caching
"""
import argparse
import glob
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
# BertConfig,
# BertForSequenceClassification,
# BertTokenizer,
# DistilBertConfig,
# DistilBertForSequenceClassification,
# DistilBertTokenizer,
# FlaubertConfig,
# FlaubertForSequenceClassification,
# FlaubertTokenizer,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
# XLMConfig,
# XLMForSequenceClassification,
# XLMRobertaConfig,
# XLMRobertaForSequenceClassification,
# XLMRobertaTokenizer,
# XLMTokenizer,
# XLNetConfig,
# XLNetForSequenceClassification,
# XLNetTokenizer,
# get_linear_schedule_with_warmup,
)
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
## added by <NAME>
import torchtext
import cnn_utils
import train_utils
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
#BertConfig,
#XLNetConfig,
#XLMConfig,
#RobertaConfig,
#DistilBertConfig,
AlbertConfig,
#XLMRobertaConfig,
#FlaubertConfig,
)
),
(),
)
MODEL_CLASSES = {
#"bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
#"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
#"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"roberta": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
#"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
#"xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
#"flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
}
## added by <NAME>
class TTDataset(torchtext.data.Dataset):
'''Dummy Dataset for build_vocab'''
def __init__(self, words, fields):
data_fields = [('text', fields['text'])]
ex = (words,)
examples = [torchtext.data.Example.fromlist(ex, data_fields)]
super(TTDataset, self).__init__(examples, data_fields)
## added by <NAME>
def load_cnn_model_and_vocab(args, cnn_file, words):
assert args.emb_file and args.min_freq
fields = cnn_utils.get_fields()
# build vocabularies from words (idx of input)
train_utils.build_vocab(args, fields, TTDataset(words, fields), [])
# load pre-trained generator model
vocab = fields['text'].vocab
model, pre_fields = train_utils.load_cnn_model(args, cnn_file, fields)
return model, pre_fields['text'].vocab.stoi
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
# modified by <NAME>
# - Converting input examples to cached examples
# - cnn_stoi: vocab.stoi for cnn models
def load_and_cache_examples(args, task, filename, tokenizer, cnn_stoi, evaluate=False, output_examples=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
fstem = list(filter(None,filename.split("/"))).pop()
fstem = fstem.split(".")[0]
data_type = args.task_name
feat_dir = args.feat_dir if args.feat_dir is not None else "."
cached_features_file = os.path.join(
args.feat_dir,
data_type,
"cached_{}_{}_{}_{}_{}".format(
fstem,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
args.cnn_stem,
list(filter(None, args.cnn_model.split("_"))).pop(),
str(args.max_seq_length),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s with fstem %s", args.data_dir, fstem)
logger.info("FSTEM: {}".format(fstem))
label_list = processor.get_labels()
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
if fstem == 'train':
examples = processor.get_train_examples(args.data_dir)
elif fstem in ["dev", "dev_matched"]:
examples = processor.get_dev_examples(args.data_dir)
elif fstem == 'dev_mismatched':
examples = processor.get_dev_mm_examples(args.data_dir)
elif fstem == 'test_mismatched':
examples = processor.get_test_mm_examples(args.data_dir)
elif fstem in ["test", "test_matched"]:
examples = processor.get_test_examples(args.data_dir)
features = convert_examples_to_features(
examples,
tokenizer,
cnn_stoi=cnn_stoi,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--task_name",
default="cola",
type=str,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--feat_dir",
default="",
type=str,
help="Where do you want to store the processed data whose features were extracted from the input data",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
# added by <NAME>
parser.add_argument(
"--prep_vocab_file",
default=None,
type=str,
required=True,
help="The preprocessed_vocab_file. see make_glue_cnn_vocab.py",
)
parser.add_argument(
"--emb_file",
default=None,
type=str,
required=True,
help="The embedding vector file used for CNN",
)
parser.add_argument(
"--cnn_model",
default=None,
type=str,
required=True,
help="The CNN model file name",
)
parser.add_argument(
"--cnn_stem",
default="enwiki",
type=str,
help="file stem for CNN models for caching",
)
parser.add_argument(
"--min_freq",
default=5,
type=int,
help="min freq. for unknown words",
)
parser.add_argument(
"--emb_dim",
default=None,
type=int,
help="dim for representation of fastText",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
assert args.prep_vocab_file is not None
assert args.cnn_model is not None
assert args.emb_dim is not None
assert args.emb_file is not None
if (not os.path.exists(args.prep_vocab_file)):
raise ValueError(
"prep_vocab_file ({}) does not exist. Check the --prep_vocab_file option.".format( args.prep_vocab_file) )
if (not os.path.exists(args.cnn_model)):
raise ValueError(
"cnn_model ({}) does not exist. Check the --cnn_model option.".format( args.cnn_model) )
if (not os.path.exists(args.emb_file)):
raise ValueError(
"emb_file ({}) does not exist. Check the --emb_file option.".format( args.emb_file) )
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# added by <NAME>
# prep_vocab_file: see run_preprocessor.sh
prep_tokens = torch.load(args.prep_vocab_file)
all_tokens = prep_tokens['tokens']
# Here, loading CNN models
cnn_model, cnn_stoi = load_cnn_model_and_vocab(args, args.cnn_model, all_tokens)
cnn_dim = len(cnn_model.args.filter_widths) * cnn_model.args.filter_size
args.cnn_dim = cnn_dim
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
# CONFIG
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=None,
)
config.num_of_TIERs = 3
config.cnn_dim = args.cnn_dim
config.emb_dim = args.emb_dim
config.cnn_model = args.cnn_model
config.cnn_stem = args.cnn_stem
# TOKENIZER
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=None,
)
# MODEL
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
cnn_model.to(args.device)
train_file = "train.tsv"
dev_file = ["dev_matched.tsv","dev_mismatched.tsv"] if args.task_name == "mnli" or args.task_name == "mnlim" else ["dev.tsv", ]
#test_file = ["test_matched.tsv","test_mismatched.tsv"] if args.task_name == "mnli" or args.task_name == "mnlim" else ["test.tsv", ]
###################
#### MAIN
###################
logger.info("==== {} ====".format(train_file))
logger.info("==== {} ====".format(dev_file))
for i in range(len(dev_file)):
if os.path.isfile(os.path.join(args.data_dir, dev_file[i])):
logger.info("==== {} ====".format(dev_file[i]))
load_and_cache_examples(args, args.task_name, dev_file[i], tokenizer, cnn_stoi, evaluate=True)
if os.path.isfile(os.path.join(args.data_dir, train_file)):
logger.info("==== {} ====".format(train_file))
load_and_cache_examples(args, args.task_name, train_file, tokenizer, cnn_stoi, evaluate=False)
if __name__ == "__main__":
main()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"ptvsd.enable_attach",
"train_utils.load_cnn_model",
"torch.cuda.device_count",
"torch.device",
"transformers.glue_processors.keys",
"os.path.join",
"torch.load",
"os.path.exists",
"random.seed",
"torch.cuda.set_device",
"torch.manual_seed",
... | [((3077, 3104), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3094, 3104), False, 'import logging\n'), ((4617, 4639), 'cnn_utils.get_fields', 'cnn_utils.get_fields', ([], {}), '()\n', (4637, 4639), False, 'import cnn_utils\n'), ((4859, 4909), 'train_utils.load_cnn_model', 'train_utils.load_cnn_model', (['args', 'cnn_file', 'fields'], {}), '(args, cnn_file, fields)\n', (4885, 4909), False, 'import train_utils\n'), ((4985, 5007), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (4996, 5007), False, 'import random\n'), ((5012, 5037), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (5026, 5037), True, 'import numpy as np\n'), ((5042, 5070), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (5059, 5070), False, 'import torch\n'), ((8281, 8306), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8304, 8306), False, 'import argparse\n'), ((13858, 14053), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': '(logging.INFO if args.local_rank in [-1, 0] else logging.WARN)'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO if args.local_rank in [-1, 0] else\n logging.WARN)\n", (13877, 14053), False, 'import logging\n'), ((14782, 14814), 'torch.load', 'torch.load', (['args.prep_vocab_file'], {}), '(args.prep_vocab_file)\n', (14792, 14814), False, 'import torch\n'), ((5102, 5139), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (5128, 5139), False, 'import torch\n'), ((5427, 5454), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (5452, 5454), False, 'import torch\n'), ((6204, 6240), 'os.path.exists', 'os.path.exists', (['cached_features_file'], {}), '(cached_features_file)\n', (6218, 6240), False, 'import os\n'), ((6372, 6404), 'torch.load', 'torch.load', (['cached_features_file'], {}), '(cached_features_file)\n', (6382, 6404), False, 'import torch\n'), ((8112, 8139), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (8137, 8139), False, 'import torch\n'), ((12367, 12403), 'os.path.exists', 'os.path.exists', (['args.prep_vocab_file'], {}), '(args.prep_vocab_file)\n', (12381, 12403), False, 'import os\n'), ((12568, 12598), 'os.path.exists', 'os.path.exists', (['args.cnn_model'], {}), '(args.cnn_model)\n', (12582, 12598), False, 'import os\n'), ((12741, 12770), 'os.path.exists', 'os.path.exists', (['args.emb_file'], {}), '(args.emb_file)\n', (12755, 12770), False, 'import os\n'), ((13169, 13258), 'ptvsd.enable_attach', 'ptvsd.enable_attach', ([], {'address': '(args.server_ip, args.server_port)', 'redirect_output': '(True)'}), '(address=(args.server_ip, args.server_port),\n redirect_output=True)\n', (13188, 13258), False, 'import ptvsd\n'), ((13263, 13286), 'ptvsd.wait_for_attach', 'ptvsd.wait_for_attach', ([], {}), '()\n', (13284, 13286), False, 'import ptvsd\n'), ((13499, 13524), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (13522, 13524), False, 'import torch\n'), ((13630, 13668), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (13651, 13668), False, 'import torch\n'), ((13686, 13723), 'torch.device', 'torch.device', (['"""cuda"""', 'args.local_rank'], {}), "('cuda', args.local_rank)\n", (13698, 13723), False, 'import torch\n'), ((13732, 13784), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (13768, 13784), False, 'import torch\n'), ((15166, 15193), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (15191, 15193), False, 'import torch\n'), ((16267, 16294), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (16292, 16294), False, 'import torch\n'), ((17192, 17231), 'os.path.join', 'os.path.join', (['args.data_dir', 'train_file'], {}), '(args.data_dir, train_file)\n', (17204, 17231), False, 'import os\n'), ((4374, 4422), 'torchtext.data.Example.fromlist', 'torchtext.data.Example.fromlist', (['ex', 'data_fields'], {}), '(ex, data_fields)\n', (4405, 4422), False, 'import torchtext\n'), ((8014, 8056), 'torch.save', 'torch.save', (['features', 'cached_features_file'], {}), '(features, cached_features_file)\n', (8024, 8056), False, 'import torch\n'), ((16964, 17004), 'os.path.join', 'os.path.join', (['args.data_dir', 'dev_file[i]'], {}), '(args.data_dir, dev_file[i])\n', (16976, 17004), False, 'import os\n'), ((9156, 9173), 'transformers.glue_processors.keys', 'processors.keys', ([], {}), '()\n', (9171, 9173), True, 'from transformers import glue_processors as processors\n'), ((13419, 13444), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13442, 13444), False, 'import torch\n')] |
from pathlib import Path
import numpy as np
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from elastic import ES
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
es = ES()
app = FastAPI()
data_path = Path("/data")
image_ids = np.load(data_path / "ids.npy")
def id_to_url(image_id):
return f"https://iiif.wellcomecollection.org/image/{image_id}.jpg/full/760,/0/default.jpg"
@app.get("/similar-images/approximate")
def approximate(n_classifiers: int, n_clusters: int, image_id: str = None, n: int = 10):
image_id = image_id or np.random.choice(image_ids)
index_name = f"{n_classifiers}-{n_clusters}"
similar_image_ids = es.lsh_query(index_name, image_id, n)
response = {
"query_id": image_id,
"query_image_url": id_to_url(image_id),
"similar_image_ids": similar_image_ids,
"similar_image_urls": [
id_to_url(image_id) for image_id in similar_image_ids
]
}
return response
@app.get("/similar-images/exact")
def exact(image_id: str = None, n: int = 10):
image_id = image_id or np.random.choice(image_ids)
similar_image_ids = es.exact_query(image_id, n)
response = {
"query_id": image_id,
"query_image_url": id_to_url(image_id),
"similar_image_ids": similar_image_ids,
"similar_image_urls": [
id_to_url(image_id) for image_id in similar_image_ids
]
}
return response
@app.post("/assessment",)
async def post_assessment(assessment: dict):
es.index_document(
index_name="assessment",
body=assessment
)
return assessment
@app.get("/data_for_interface")
def data_for_interface():
query_id = np.random.choice(image_ids)
index_a = random_index_name()
index_b = random_index_name()
while index_b == index_a:
index_b = random_index_name()
similar_image_ids_a = es.lsh_query(index_a, query_id, 6)
similar_image_ids_b = es.lsh_query(index_b, query_id, 6)
return {
"query_id": query_id,
"index_a": index_a,
"index_b": index_b,
"similar_image_ids_a": similar_image_ids_a,
"similar_image_ids_b": similar_image_ids_b,
}
def random_index_name():
n_classifiers = np.random.choice([32, 64, 128, 256, 512])
n_clusters = np.random.choice([8, 16, 32, 64, 128, 256])
return str(n_classifiers) + "-" + str(n_clusters)
| [
"numpy.load",
"elastic.ES",
"pathlib.Path",
"numpy.random.choice",
"fastapi.FastAPI"
] | [((155, 164), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (162, 164), False, 'from fastapi import FastAPI\n'), ((317, 321), 'elastic.ES', 'ES', ([], {}), '()\n', (319, 321), False, 'from elastic import ES\n'), ((328, 337), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (335, 337), False, 'from fastapi import FastAPI\n'), ((350, 363), 'pathlib.Path', 'Path', (['"""/data"""'], {}), "('/data')\n", (354, 363), False, 'from pathlib import Path\n'), ((376, 406), 'numpy.load', 'np.load', (["(data_path / 'ids.npy')"], {}), "(data_path / 'ids.npy')\n", (383, 406), True, 'import numpy as np\n'), ((1825, 1852), 'numpy.random.choice', 'np.random.choice', (['image_ids'], {}), '(image_ids)\n', (1841, 1852), True, 'import numpy as np\n'), ((2368, 2409), 'numpy.random.choice', 'np.random.choice', (['[32, 64, 128, 256, 512]'], {}), '([32, 64, 128, 256, 512])\n', (2384, 2409), True, 'import numpy as np\n'), ((2427, 2470), 'numpy.random.choice', 'np.random.choice', (['[8, 16, 32, 64, 128, 256]'], {}), '([8, 16, 32, 64, 128, 256])\n', (2443, 2470), True, 'import numpy as np\n'), ((687, 714), 'numpy.random.choice', 'np.random.choice', (['image_ids'], {}), '(image_ids)\n', (703, 714), True, 'import numpy as np\n'), ((1212, 1239), 'numpy.random.choice', 'np.random.choice', (['image_ids'], {}), '(image_ids)\n', (1228, 1239), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
import numpy as np
import math
import random
import os
import gym
# Hyper Parameters
STATE_DIM = 4
ACTION_DIM = 2
STEP = 2000
SAMPLE_NUMS = 30
class ActorNetwork(nn.Module):
def __init__(self, input_size, hidden_size, action_size):
super(ActorNetwork, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, action_size)
def forward(self, x):
out = F.relu(self.fc1(x))
out = F.relu(self.fc2(out))
out = F.log_softmax(self.fc3(out), dim=-1)
return out
class ValueNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(ValueNetwork, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, output_size)
def forward(self, x):
out = F.relu(self.fc1(x))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
def roll_out(actor_network, task, sample_nums, value_network, init_state):
# task.reset()
states = []
actions = []
rewards = []
is_done = False
final_r = 0
state = init_state
for j in range(sample_nums):
states.append(state)
log_softmax_action = actor_network(Variable(torch.Tensor([state])))
softmax_action = torch.exp(log_softmax_action)
action = np.random.choice(ACTION_DIM, p=softmax_action.data.numpy()[0])
one_hot_action = [int (k == action) for k in range(ACTION_DIM)]
next_state, reward, done, info = task.step(action)
actions.append(one_hot_action)
rewards.append(reward)
final_state = next_state
state = next_state
if done:
is_done = True
state = task.reset()
break
if not is_done:
final_r = value_network(Variable(torch.Tensor([final_state]))).data.numpy()
return states, actions, rewards, final_r, state
def discount_reward(r, gamma, final_r):
discounted_r = np.zeros_like(r)
running_add = final_r
for t in reversed(range(0, len(r))):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def main():
# init a task generator for data fetching
task = gym.make('CartPole-v0')
init_state = task.reset()
# init value network
value_network = ValueNetwork(
input_size=STATE_DIM, hidden_size=40, output_size=1)
value_network_optim = torch.optim.Adam(
value_network.parameters(), lr=0.01)
# init actor network
actor_network = ActorNetwork(
input_size=STATE_DIM, hidden_size=40, action_size=ACTION_DIM)
actor_network_optim = torch.optim.Adam(
actor_network.parameters(), lr=0.01)
steps = []
task_episodes = []
test_results = []
for step in range(STEP):
task.render()
states, actions, rewards, final_r, current_state = roll_out(
actor_network, task, SAMPLE_NUMS, value_network, init_state)
init_state = current_state
actions_var = Variable(torch.Tensor(actions).view(-1, ACTION_DIM))
states_var = Variable(torch.Tensor(states).view(-1, STATE_DIM))
# train actor network
actor_network_optim.zero_grad()
log_softmax_actions = actor_network(states_var)
vs = value_network(states_var).detach()
# calculate qs
qs = Variable(torch.Tensor(discount_reward(rewards, 0.99, final_r))).view(-1, 1)
advantages = qs - vs
actor_network_loss = -torch.mean(
torch.sum(log_softmax_actions * actions_var, 1) * advantages)
actor_network_loss.backward()
torch.nn.utils.clip_grad_norm_(actor_network.parameters(), 0.5)
actor_network_optim.step()
# train value network
value_network_optim.zero_grad()
target_values = qs
values = value_network(states_var)
criterion = nn.MSELoss()
value_network_loss = criterion(values, target_values)
value_network_loss.backward()
torch.nn.utils.clip_grad_norm_(value_network.parameters(), 0.5)
value_network_optim.step()
# Testing
if (step + 1) % 50 == 0:
result = 0
test_task = gym.make('CartPole-v0')
for test_epi in range(10):
state = test_task.reset()
for test_step in range(200):
softmax_action = torch.exp(actor_network(Variable(torch.Tensor([state]))))
# print(softmax_action.data)
action = np.argmax(softmax_action.data.numpy()[0])
next_state, reward, done, _ = test_task.step(action)
result += reward
state = next_state
if done:
break
print("step:", step + 1, "test result:", result / 10.0)
steps.append(step + 1)
test_results.append(result / 10)
if __name__ == '__main__':
main()
| [
"torch.nn.MSELoss",
"numpy.zeros_like",
"gym.make",
"torch.exp",
"torch.Tensor",
"torch.nn.Linear",
"torch.sum"
] | [((2252, 2268), 'numpy.zeros_like', 'np.zeros_like', (['r'], {}), '(r)\n', (2265, 2268), True, 'import numpy as np\n'), ((2517, 2540), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (2525, 2540), False, 'import gym\n'), ((439, 473), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (448, 473), True, 'import torch.nn as nn\n'), ((493, 528), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (502, 528), True, 'import torch.nn as nn\n'), ((548, 583), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'action_size'], {}), '(hidden_size, action_size)\n', (557, 583), True, 'import torch.nn as nn\n'), ((910, 944), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (919, 944), True, 'import torch.nn as nn\n'), ((964, 999), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (973, 999), True, 'import torch.nn as nn\n'), ((1019, 1054), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (1028, 1054), True, 'import torch.nn as nn\n'), ((1568, 1597), 'torch.exp', 'torch.exp', (['log_softmax_action'], {}), '(log_softmax_action)\n', (1577, 1597), False, 'import torch\n'), ((4178, 4190), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4188, 4190), True, 'import torch.nn as nn\n'), ((4498, 4521), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (4506, 4521), False, 'import gym\n'), ((1519, 1540), 'torch.Tensor', 'torch.Tensor', (['[state]'], {}), '([state])\n', (1531, 1540), False, 'import torch\n'), ((3322, 3343), 'torch.Tensor', 'torch.Tensor', (['actions'], {}), '(actions)\n', (3334, 3343), False, 'import torch\n'), ((3396, 3416), 'torch.Tensor', 'torch.Tensor', (['states'], {}), '(states)\n', (3408, 3416), False, 'import torch\n'), ((3809, 3856), 'torch.sum', 'torch.sum', (['(log_softmax_actions * actions_var)', '(1)'], {}), '(log_softmax_actions * actions_var, 1)\n', (3818, 3856), False, 'import torch\n'), ((2095, 2122), 'torch.Tensor', 'torch.Tensor', (['[final_state]'], {}), '([final_state])\n', (2107, 2122), False, 'import torch\n'), ((4718, 4739), 'torch.Tensor', 'torch.Tensor', (['[state]'], {}), '([state])\n', (4730, 4739), False, 'import torch\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""file for evaling"""
import argparse
import numpy as np
from skimage.color import rgb2ycbcr
from skimage.metrics import peak_signal_noise_ratio
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.common import set_seed
from mindspore import context
import mindspore.ops as ops
from src.model.generator import Generator
from src.dataset.testdataset import create_testdataset
set_seed(1)
parser = argparse.ArgumentParser(description="SRGAN eval")
parser.add_argument("--test_LR_path", type=str, default='/data/Set14/LR')
parser.add_argument("--test_GT_path", type=str, default='/data/Set14/HR')
parser.add_argument("--res_num", type=int, default=16)
parser.add_argument("--scale", type=int, default=4)
parser.add_argument("--generator_path", type=str, default='./ckpt/best.ckpt')
parser.add_argument("--mode", type=str, default='train')
parser.add_argument("--device_id", type=int, default=0, help="device id, default: 0.")
i = 0
if __name__ == '__main__':
args = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_id=args.device_id, save_graphs=False)
test_ds = create_testdataset(1, args.test_LR_path, args.test_GT_path)
test_data_loader = test_ds.create_dict_iterator()
generator = Generator(4)
params = load_checkpoint(args.generator_path)
load_param_into_net(generator, params)
op = ops.ReduceSum(keep_dims=False)
psnr_list = []
print("=======starting test=====")
for data in test_data_loader:
lr = data['LR']
gt = data['HR']
bs, c, h, w = lr.shape[:4]
gt = gt[:, :, : h * args.scale, : w *args.scale]
output = generator(lr)
output = op(output, 0)
output = output.asnumpy()
output = np.clip(output, -1.0, 1.0)
gt = op(gt, 0)
output = (output + 1.0) / 2.0
gt = (gt + 1.0) / 2.0
output = output.transpose(1, 2, 0)
gt = gt.asnumpy()
gt = gt.transpose(1, 2, 0)
y_output = rgb2ycbcr(output)[args.scale:-args.scale, args.scale:-args.scale, :1]
y_gt = rgb2ycbcr(gt)[args.scale:-args.scale, args.scale:-args.scale, :1]
psnr = peak_signal_noise_ratio(y_output / 255.0, y_gt / 255.0, data_range=1.0)
psnr_list.append(psnr)
print("avg PSNR:", np.mean(psnr_list))
| [
"mindspore.context.set_context",
"argparse.ArgumentParser",
"src.dataset.testdataset.create_testdataset",
"mindspore.ops.ReduceSum",
"numpy.clip",
"mindspore.common.set_seed",
"skimage.color.rgb2ycbcr",
"numpy.mean",
"mindspore.train.serialization.load_checkpoint",
"skimage.metrics.peak_signal_noi... | [((1088, 1099), 'mindspore.common.set_seed', 'set_seed', (['(1)'], {}), '(1)\n', (1096, 1099), False, 'from mindspore.common import set_seed\n'), ((1109, 1158), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""SRGAN eval"""'}), "(description='SRGAN eval')\n", (1132, 1158), False, 'import argparse\n'), ((1704, 1797), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_id': 'args.device_id', 'save_graphs': '(False)'}), '(mode=context.GRAPH_MODE, device_id=args.device_id,\n save_graphs=False)\n', (1723, 1797), False, 'from mindspore import context\n'), ((1808, 1867), 'src.dataset.testdataset.create_testdataset', 'create_testdataset', (['(1)', 'args.test_LR_path', 'args.test_GT_path'], {}), '(1, args.test_LR_path, args.test_GT_path)\n', (1826, 1867), False, 'from src.dataset.testdataset import create_testdataset\n'), ((1938, 1950), 'src.model.generator.Generator', 'Generator', (['(4)'], {}), '(4)\n', (1947, 1950), False, 'from src.model.generator import Generator\n'), ((1964, 2000), 'mindspore.train.serialization.load_checkpoint', 'load_checkpoint', (['args.generator_path'], {}), '(args.generator_path)\n', (1979, 2000), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net\n'), ((2005, 2043), 'mindspore.train.serialization.load_param_into_net', 'load_param_into_net', (['generator', 'params'], {}), '(generator, params)\n', (2024, 2043), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net\n'), ((2053, 2083), 'mindspore.ops.ReduceSum', 'ops.ReduceSum', ([], {'keep_dims': '(False)'}), '(keep_dims=False)\n', (2066, 2083), True, 'import mindspore.ops as ops\n'), ((2432, 2458), 'numpy.clip', 'np.clip', (['output', '(-1.0)', '(1.0)'], {}), '(output, -1.0, 1.0)\n', (2439, 2458), True, 'import numpy as np\n'), ((2843, 2914), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['(y_output / 255.0)', '(y_gt / 255.0)'], {'data_range': '(1.0)'}), '(y_output / 255.0, y_gt / 255.0, data_range=1.0)\n', (2866, 2914), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((2969, 2987), 'numpy.mean', 'np.mean', (['psnr_list'], {}), '(psnr_list)\n', (2976, 2987), True, 'import numpy as np\n'), ((2676, 2693), 'skimage.color.rgb2ycbcr', 'rgb2ycbcr', (['output'], {}), '(output)\n', (2685, 2693), False, 'from skimage.color import rgb2ycbcr\n'), ((2761, 2774), 'skimage.color.rgb2ycbcr', 'rgb2ycbcr', (['gt'], {}), '(gt)\n', (2770, 2774), False, 'from skimage.color import rgb2ycbcr\n')] |
import numpy as np
import dolfin
import os
class Domain():
"""
GENERAL DOMAIN HANDLING
* This class reads the mesh file, optionally convert it to xml
format (cannot be paralellized) to xml format.
* Important geometric properties for the periodic boundary
conditions are also obtained from the mesh.
"""
def __init__(self, filename):
################################
# Filename handling
################################
self.fname = filename
self.name, self.ext = os.path.splitext(filename)
################################
# Read domain file depending on your extension
################################
if self.ext == ".msh":
self._dolfin_convert(self.fname)
self.__read_xml()
elif self.ext == ".xml":
self.__read_xml()
elif self.ext == ".xdmf":
#xdmf_extract(self.fname)
self.__read_xdmf()
self.dim = self.mesh.geometry().dim() # Dimension of the domain
self.ele_num = self.mesh.num_cells() # Number of elements in the domain
self.phases = np.unique(self.subdomains.array()).astype(int) # Number of phases in the domain
#self.verticies, self.vol = self.__get_vertices()
self.bounds, self.vol = self.__get_bounds() # Get the bounds and calculate the volume of the domain
self.__get_volume() # Get volume of every element
def _dolfin_convert(self, filename):
"""
Convert the .msh file with msh2 format to xml using dolfin-convert
*** Legacy format try not to use it!
"""
name, _ = os.path.splitext(filename)
os.system('dolfin-convert '+str(filename)+' '+str(name)+'.xml')
def __read_xml(self):
"""
Note:Legacy extension try not to use this method. Just here for some tests!
"""
self.mesh = dolfin.Mesh(self.name+".xml")
self.subdomains = dolfin.MeshFunction("size_t", self.mesh, self.name+"_physical_region.xml")
self.facets = dolfin.MeshFunction("size_t", self.mesh, self.name+"_facet_region.xml")
def __read_xdmf(self):
"""
To do: name_to_read -> more specific names like subdomain and stuff!
"""
################################
# Read main domain file and put it to self.mesh
################################
self.mesh = dolfin.Mesh()
with dolfin.XDMFFile(self.name+".xdmf") as infile:
infile.read(self.mesh)
################################
# Read physical region file and put it to self.subdomains
################################
mvc = dolfin.MeshValueCollection("size_t", self.mesh, 3)
with dolfin.XDMFFile(self.name+"_physical_region.xdmf") as infile:
infile.read(mvc, "name_to_read")
self.subdomains = dolfin.MeshFunction('size_t',self.mesh, mvc)
################################
# Read facet region file and put it to self.facets
################################
mfc = dolfin.MeshValueCollection("size_t", self.mesh, 3)
with dolfin.XDMFFile(self.name+"_facet_region.xdmf") as infile:
infile.read(mfc, "name_to_read")
self.facets = dolfin.MeshFunction("size_t", self.mesh, mfc)
def __get_vertices(self):
"""
Note: Not using it any more!
(x_min,y_max) #-----# (x_max,y_max)
| |
| |
| |
(x_min,y_min) #-----# (x_max,y_min)
"""
if self.dim == 2:
x_min = np.min(self.mesh.coordinates()[:,0])
x_max = np.max(self.mesh.coordinates()[:,0])
y_min = np.min(self.mesh.coordinates()[:,1])
y_max = np.max(self.mesh.coordinates()[:,1])
vert = np.array([[x_min,y_min], [x_max,y_min], \
[x_max,y_max], [x_min,y_max]])
vol = x_max * y_max
elif self.dim == 3:
raise ("Not implimented yet!")
return vert, vol
def __get_bounds(self):
"""
Method: Get the bounds of your domain
(x_max,y_max,z_max)
#-----------#
/ | / |
/ | / |
#----------# |
| | | |
| #----------#
| / | /
| / | /
|/ |/
#----------#
(x_min,y_min,z_min)
"""
################################
# Bounds for 2D domains
################################
if self.dim == 2:
x_min = np.min(self.mesh.coordinates()[:,0])
x_max = np.max(self.mesh.coordinates()[:,0])
y_min = np.min(self.mesh.coordinates()[:,1])
y_max = np.max(self.mesh.coordinates()[:,1])
vol = x_max * y_max
bounds = np.array([[x_min, y_min],[x_max, y_max]])
################################
# Bounds for 3D domains
################################
elif self.dim == 3:
x_min = np.min(self.mesh.coordinates()[:,0])
x_max = np.max(self.mesh.coordinates()[:,0])
y_min = np.min(self.mesh.coordinates()[:,1])
y_max = np.max(self.mesh.coordinates()[:,1])
z_min = np.min(self.mesh.coordinates()[:,2])
z_max = np.max(self.mesh.coordinates()[:,2])
vol = x_max * y_max * z_max
bounds = np.array([[x_min, y_min, z_min],[x_max, y_max, z_max]])
return bounds, vol
def __get_volume(self):
"""
Method: Get volume/area of all the elements in a numpy array
"""
self.ele_vol = np.zeros(self.ele_num)
for i in range(self.ele_num):
cell = dolfin.Cell(self.mesh, i)
self.ele_vol[i] = cell.volume()
| [
"dolfin.Cell",
"dolfin.MeshValueCollection",
"dolfin.MeshFunction",
"numpy.zeros",
"dolfin.XDMFFile",
"dolfin.Mesh",
"numpy.array",
"os.path.splitext"
] | [((573, 599), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (589, 599), False, 'import os\n'), ((1759, 1785), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1775, 1785), False, 'import os\n'), ((2025, 2056), 'dolfin.Mesh', 'dolfin.Mesh', (["(self.name + '.xml')"], {}), "(self.name + '.xml')\n", (2036, 2056), False, 'import dolfin\n'), ((2081, 2157), 'dolfin.MeshFunction', 'dolfin.MeshFunction', (['"""size_t"""', 'self.mesh', "(self.name + '_physical_region.xml')"], {}), "('size_t', self.mesh, self.name + '_physical_region.xml')\n", (2100, 2157), False, 'import dolfin\n'), ((2178, 2251), 'dolfin.MeshFunction', 'dolfin.MeshFunction', (['"""size_t"""', 'self.mesh', "(self.name + '_facet_region.xml')"], {}), "('size_t', self.mesh, self.name + '_facet_region.xml')\n", (2197, 2251), False, 'import dolfin\n'), ((2542, 2555), 'dolfin.Mesh', 'dolfin.Mesh', ([], {}), '()\n', (2553, 2555), False, 'import dolfin\n'), ((2813, 2863), 'dolfin.MeshValueCollection', 'dolfin.MeshValueCollection', (['"""size_t"""', 'self.mesh', '(3)'], {}), "('size_t', self.mesh, 3)\n", (2839, 2863), False, 'import dolfin\n'), ((3011, 3056), 'dolfin.MeshFunction', 'dolfin.MeshFunction', (['"""size_t"""', 'self.mesh', 'mvc'], {}), "('size_t', self.mesh, mvc)\n", (3030, 3056), False, 'import dolfin\n'), ((3212, 3262), 'dolfin.MeshValueCollection', 'dolfin.MeshValueCollection', (['"""size_t"""', 'self.mesh', '(3)'], {}), "('size_t', self.mesh, 3)\n", (3238, 3262), False, 'import dolfin\n'), ((3403, 3448), 'dolfin.MeshFunction', 'dolfin.MeshFunction', (['"""size_t"""', 'self.mesh', 'mfc'], {}), "('size_t', self.mesh, mfc)\n", (3422, 3448), False, 'import dolfin\n'), ((6256, 6278), 'numpy.zeros', 'np.zeros', (['self.ele_num'], {}), '(self.ele_num)\n', (6264, 6278), True, 'import numpy as np\n'), ((2569, 2605), 'dolfin.XDMFFile', 'dolfin.XDMFFile', (["(self.name + '.xdmf')"], {}), "(self.name + '.xdmf')\n", (2584, 2605), False, 'import dolfin\n'), ((2878, 2930), 'dolfin.XDMFFile', 'dolfin.XDMFFile', (["(self.name + '_physical_region.xdmf')"], {}), "(self.name + '_physical_region.xdmf')\n", (2893, 2930), False, 'import dolfin\n'), ((3277, 3326), 'dolfin.XDMFFile', 'dolfin.XDMFFile', (["(self.name + '_facet_region.xdmf')"], {}), "(self.name + '_facet_region.xdmf')\n", (3292, 3326), False, 'import dolfin\n'), ((4067, 4141), 'numpy.array', 'np.array', (['[[x_min, y_min], [x_max, y_min], [x_max, y_max], [x_min, y_max]]'], {}), '([[x_min, y_min], [x_max, y_min], [x_max, y_max], [x_min, y_max]])\n', (4075, 4141), True, 'import numpy as np\n'), ((5385, 5427), 'numpy.array', 'np.array', (['[[x_min, y_min], [x_max, y_max]]'], {}), '([[x_min, y_min], [x_max, y_max]])\n', (5393, 5427), True, 'import numpy as np\n'), ((6336, 6361), 'dolfin.Cell', 'dolfin.Cell', (['self.mesh', 'i'], {}), '(self.mesh, i)\n', (6347, 6361), False, 'import dolfin\n'), ((6012, 6068), 'numpy.array', 'np.array', (['[[x_min, y_min, z_min], [x_max, y_max, z_max]]'], {}), '([[x_min, y_min, z_min], [x_max, y_max, z_max]])\n', (6020, 6068), True, 'import numpy as np\n')] |
from __future__ import print_function
import numpy as np
import tensorflow as tf
import six
from timeit import default_timer as timer
class LSTM_Var_Autoencoder(object):
def __init__(self, intermediate_dim=None, z_dim=None, n_dim=None, kulback_coef=0.1,
stateful=False):
"""
Args:
intermediate_dim : LSTM cells dimension.
z_dim : dimension of latent space.
n_dim : dimension of input data.
statefull : if true, keep cell state through batches.
"""
if not intermediate_dim or not z_dim or not n_dim:
raise ValueError("You should set intermediate_dim, z_dim"
"(latent space) dimension and your input"
"third dimension, n_dim."
" \n ")
tf.reset_default_graph()
self.z_dim = z_dim
self.n_dim = n_dim
self.intermediate_dim = intermediate_dim
self.stateful = stateful
self.input = tf.placeholder(tf.float32, shape=[None, None, self.n_dim])
self.batch_size = tf.placeholder(tf.int64)
self.kulback_coef = kulback_coef
# tf.data api
dataset = tf.data.Dataset.from_tensor_slices(self.input).repeat() \
.batch(self.batch_size)
self.batch_ = tf.placeholder(tf.int32, shape=[])
self.ite = dataset.make_initializable_iterator()
self.x = self.ite.get_next()
self.repeat = tf.placeholder(tf.int32)
def gauss_sampling(mean, sigma):
with tf.name_scope("sample_gaussian"):
eps = tf.random_normal(tf.shape(sigma), 0, 1, dtype=tf.float32)
# It should be log(sigma / 2), but this empirically converges"
# much better for an unknown reason"
z = tf.add(mean, tf.exp(0.5*sigma) * eps)
return z
# (with few modifications) from https://stackoverflow.com/questions
def get_state_variables(batch_size, cell):
# For each layer, get the initial state and make a variable out of it
# to enable updating its value.
state_variables = []
for state_c, state_h in cell.zero_state(batch_size, tf.float32):
state_variables.append(tf.nn.rnn_cell.LSTMStateTuple(
(state_c), (state_h)))
# Return as a tuple, so that it can be fed to dynamic_rnn as an initial
# state
return tuple(state_variables)
# Add an operation to update the train states with the last state
# tensors
def get_state_update_op(state_variables, new_states):
update_ops = []
for state_variable, new_state in zip(state_variables, new_states):
update_ops.extend([state_variable[0] == new_state[0],
state_variable[1] == new_state[1]])
return tf.tuple(update_ops)
# Return an operation to set each variable in a list of LSTMStateTuples
# to zero
def get_state_reset_op(state_variables, cell, batch_size):
zero_states = cell.zero_state(batch_size, tf.float32)
return get_state_update_op(state_variables, zero_states)
weights = {
'z_mean': tf.get_variable(
"z_mean",
shape=[
self.intermediate_dim,
self.z_dim],
initializer=tf.contrib.layers.xavier_initializer()),
'log_sigma': tf.get_variable(
"log_sigma",
shape=[
self.intermediate_dim,
self.z_dim],
initializer=tf.contrib.layers.xavier_initializer())}
biases = {
'z_mean_b': tf.get_variable("b_mean", shape=[self.z_dim],
initializer=tf.zeros_initializer()),
'z_std_b': tf.get_variable("b_log_sigma", shape=[self.z_dim],
initializer=tf.zeros_initializer())
}
with tf.variable_scope("encoder"):
with tf.variable_scope("LSTM_encoder"):
lstm_layer = tf.nn.rnn_cell.LSTMCell(
self.intermediate_dim,
forget_bias=1,
initializer=tf.contrib.layers.xavier_initializer(),
activation=tf.nn.relu)
if self.stateful:
self.batch_ = tf.placeholder(tf.int32, shape=[])
# throws an error without MultiRNNCell
layer = tf.nn.rnn_cell.MultiRNNCell([lstm_layer])
states = get_state_variables(self.batch_, layer)
outputs, new_states = tf.nn.dynamic_rnn(
layer, self.x, initial_state=states, dtype=tf.float32)
self.update_op = get_state_update_op(states, new_states)
self.reset_state_op = get_state_reset_op(
states, lstm_layer, self.batch_)
else:
outputs, _ = tf.nn.dynamic_rnn(lstm_layer, self.x, dtype="float32")
# For each layer, get the initial state. states will be a tuple of
# LSTMStateTuples.
self.z_mean = tf.add(tf.matmul(
outputs[:, -1, :], weights['z_mean']), biases['z_mean_b'])
self.z_sigma = tf.nn.softplus(tf.add(tf.matmul(
outputs[:, -1, :], weights['log_sigma']), biases['z_std_b']))
self.z = gauss_sampling(self.z_mean, self.z_sigma)
# from [batch_size,z_dim] to [batch_size, TIMESTEPS, z_dim]
repeated_z = tf.keras.layers.RepeatVector(
self.repeat, dtype="float32")(self.z)
with tf.variable_scope("decoder"):
if self.stateful:
with tf.variable_scope('lstm_decoder_stateful'):
rnn_layers_ = [
tf.nn.rnn_cell.LSTMCell(
size,
initializer=tf.contrib.layers.xavier_initializer(),
forget_bias=1) for size in [
self.intermediate_dim,
n_dim]]
multi_rnn_cell_ = tf.nn.rnn_cell.MultiRNNCell(rnn_layers_)
states_ = get_state_variables(self.batch_, multi_rnn_cell_)
self.x_reconstr_mean, new_states_ = tf.nn.dynamic_rnn(
cell=multi_rnn_cell_, inputs=repeated_z, initial_state=states_, dtype=tf.float32)
self.update_op_ = get_state_update_op(states_, new_states_)
self.reset_state_op_ = get_state_reset_op(
states_, multi_rnn_cell_, self.batch_)
else:
with tf.variable_scope('lstm_decoder_stateless'):
rnn_layers = [
tf.nn.rnn_cell.LSTMCell(
size,
initializer=tf.contrib.layers.xavier_initializer(),
forget_bias=1) for size in [
self.intermediate_dim,
n_dim]]
multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)
self.x_reconstr_mean, _ = tf.nn.dynamic_rnn(
cell=multi_rnn_cell, inputs=repeated_z, dtype=tf.float32)
def _create_loss_optimizer(self, opt, **param):
with tf.name_scope("MSE"):
reconstr_loss = tf.reduce_sum(
tf.losses.mean_squared_error(
self.x, self.x_reconstr_mean))
with tf.name_scope("KL_divergence"):
latent_loss = - 0.5 * tf.reduce_sum(1 + self.z_sigma
- self.z_mean**2
- tf.exp(self.z_sigma), 1)
self._cost = tf.reduce_mean(reconstr_loss + self.kulback_coef*latent_loss)
# apply gradient clipping
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars), 10)
self.train_op = opt(**param).apply_gradients(zip(grads, tvars))
def fit(
self,
X,
learning_rate=0.001,
batch_size=100,
num_epochs=200,
opt=tf.optimizers.Adam(),
REG_LAMBDA=0,
grad_clip_norm=10,
optimizer_params=None,
verbose=True):
if len(np.shape(X)) != 3:
raise ValueError(
'Input must be a 3-D array. I could reshape it for you, but I am too lazy.'
' \n Use input.reshape(-1,timesteps,1).')
if optimizer_params is None:
optimizer_params = {}
optimizer_params['learning_rate'] = learning_rate
else:
optimizer_params = dict(six.iteritems(optimizer_params))
self._create_loss_optimizer(opt, **optimizer_params)
lstm_var = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope='LSTM_encoder')
self._cost += REG_LAMBDA * tf.reduce_mean(tf.nn.l2_loss(lstm_var))
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
init = tf.global_variables_initializer()
self.sess.run(init)
self.sess.run(
self.ite.initializer,
feed_dict={
self.input: X,
self.batch_size: batch_size})
batches_per_epoch = int(np.ceil(len(X) / batch_size))
print("\n")
print("Training...")
print("\n")
start = timer()
for epoch in range(num_epochs):
train_error = 0
for step in range(batches_per_epoch):
if self.stateful:
loss, _, s, _ = self.sess.run([self._cost, self.train_op, self.update_op, self.update_op_],
feed_dict={self.repeat: np.shape(X)[1], self.batch_: batch_size})
else:
loss, _ = self.sess.run([self._cost, self.train_op], feed_dict={
self.repeat: np.shape(X)[1]})
train_error += loss
if step == (batches_per_epoch - 1):
mean_loss = train_error / batches_per_epoch
if self.stateful: # reset cell & hidden states between epochs
self.sess.run([self.reset_state_op],
feed_dict={self.batch_: batch_size})
self.sess.run([self.reset_state_op_],
feed_dict={self.batch_: batch_size})
if epoch % 10 == 0 & verbose:
print(
"Epoch {:^6} Loss {:0.5f}" .format(
epoch + 1, mean_loss))
end = timer()
print("\n")
print("Training time {:0.2f} minutes".format((end - start) / (60)))
def reconstruct(self, X, get_error=False):
self.sess.run(
self.ite.initializer,
feed_dict={
self.input: X,
self.batch_size: np.shape(X)[0]})
if self.stateful:
_, _ = self.sess.run([self.reset_state_op, self.reset_state_op_], feed_dict={
self.batch_: np.shape(X)[0]})
x_rec, _, _ = self.sess.run([self.x_reconstr_mean, self.update_op, self.update_op_], feed_dict={
self.batch_: np.shape(X)[0], self.repeat: np.shape(X)[1]})
else:
x_rec = self.sess.run(self.x_reconstr_mean,
feed_dict={self.repeat: np.shape(X)[1]})
if get_error:
squared_error = (x_rec - X)**2
return x_rec, squared_error
else:
return x_rec
def reduce(self, X):
self.sess.run(
self.ite.initializer,
feed_dict={
self.input: X,
self.batch_size: np.shape(X)[0]})
if self.stateful:
_ = self.sess.run([self.reset_state_op], feed_dict={
self.batch_: np.shape(X)[0]})
x, _ = self.sess.run([self.z, self.update_op], feed_dict={
self.batch_: np.shape(X)[0], self.repeat: np.shape(X)[1]})
else:
x = self.sess.run(self.z)
return x
| [
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.nn.rnn_cell.LSTMStateTuple",
"tensorflow.trainable_variables",
"tensorflow.get_collection",
"tensorflow.reset_default_graph",
"numpy.shape",
"tensorflow.ConfigProto",
"tensorflow.matmul",
"six.iteritems",
"tensorflow.variable_scope",
"t... | [((874, 898), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (896, 898), True, 'import tensorflow as tf\n'), ((1063, 1121), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, self.n_dim]'}), '(tf.float32, shape=[None, None, self.n_dim])\n', (1077, 1121), True, 'import tensorflow as tf\n'), ((1149, 1173), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {}), '(tf.int64)\n', (1163, 1173), True, 'import tensorflow as tf\n'), ((1376, 1410), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[]'}), '(tf.int32, shape=[])\n', (1390, 1410), True, 'import tensorflow as tf\n'), ((1530, 1554), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {}), '(tf.int32)\n', (1544, 1554), True, 'import tensorflow as tf\n'), ((8066, 8090), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (8088, 8090), True, 'import tensorflow as tf\n'), ((8404, 8424), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {}), '()\n', (8422, 8424), True, 'import tensorflow as tf\n'), ((9099, 9172), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""LSTM_encoder"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='LSTM_encoder')\n", (9116, 9172), True, 'import tensorflow as tf\n'), ((9296, 9337), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (9310, 9337), True, 'import tensorflow as tf\n'), ((9409, 9434), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (9419, 9434), True, 'import tensorflow as tf\n'), ((9451, 9484), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9482, 9484), True, 'import tensorflow as tf\n'), ((9831, 9838), 'timeit.default_timer', 'timer', ([], {}), '()\n', (9836, 9838), True, 'from timeit import default_timer as timer\n'), ((11089, 11096), 'timeit.default_timer', 'timer', ([], {}), '()\n', (11094, 11096), True, 'from timeit import default_timer as timer\n'), ((2991, 3011), 'tensorflow.tuple', 'tf.tuple', (['update_ops'], {}), '(update_ops)\n', (2999, 3011), True, 'import tensorflow as tf\n'), ((4175, 4203), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder"""'], {}), "('encoder')\n", (4192, 4203), True, 'import tensorflow as tf\n'), ((4566, 4600), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[]'}), '(tf.int32, shape=[])\n', (4580, 4600), True, 'import tensorflow as tf\n'), ((4674, 4715), 'tensorflow.nn.rnn_cell.MultiRNNCell', 'tf.nn.rnn_cell.MultiRNNCell', (['[lstm_layer]'], {}), '([lstm_layer])\n', (4701, 4715), True, 'import tensorflow as tf\n'), ((4813, 4885), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['layer', 'self.x'], {'initial_state': 'states', 'dtype': 'tf.float32'}), '(layer, self.x, initial_state=states, dtype=tf.float32)\n', (4830, 4885), True, 'import tensorflow as tf\n'), ((5120, 5174), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['lstm_layer', 'self.x'], {'dtype': '"""float32"""'}), "(lstm_layer, self.x, dtype='float32')\n", (5137, 5174), True, 'import tensorflow as tf\n'), ((5311, 5358), 'tensorflow.matmul', 'tf.matmul', (['outputs[:, -1, :]', "weights['z_mean']"], {}), "(outputs[:, -1, :], weights['z_mean'])\n", (5320, 5358), True, 'import tensorflow as tf\n'), ((5679, 5737), 'tensorflow.keras.layers.RepeatVector', 'tf.keras.layers.RepeatVector', (['self.repeat'], {'dtype': '"""float32"""'}), "(self.repeat, dtype='float32')\n", (5707, 5737), True, 'import tensorflow as tf\n'), ((5776, 5804), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder"""'], {}), "('decoder')\n", (5793, 5804), True, 'import tensorflow as tf\n'), ((7509, 7529), 'tensorflow.name_scope', 'tf.name_scope', (['"""MSE"""'], {}), "('MSE')\n", (7522, 7529), True, 'import tensorflow as tf\n'), ((7688, 7718), 'tensorflow.name_scope', 'tf.name_scope', (['"""KL_divergence"""'], {}), "('KL_divergence')\n", (7701, 7718), True, 'import tensorflow as tf\n'), ((7952, 8015), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(reconstr_loss + self.kulback_coef * latent_loss)'], {}), '(reconstr_loss + self.kulback_coef * latent_loss)\n', (7966, 8015), True, 'import tensorflow as tf\n'), ((8134, 8165), 'tensorflow.gradients', 'tf.gradients', (['self._cost', 'tvars'], {}), '(self._cost, tvars)\n', (8146, 8165), True, 'import tensorflow as tf\n'), ((1617, 1649), 'tensorflow.name_scope', 'tf.name_scope', (['"""sample_gaussian"""'], {}), "('sample_gaussian')\n", (1630, 1649), True, 'import tensorflow as tf\n'), ((4223, 4256), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""LSTM_encoder"""'], {}), "('LSTM_encoder')\n", (4240, 4256), True, 'import tensorflow as tf\n'), ((5440, 5490), 'tensorflow.matmul', 'tf.matmul', (['outputs[:, -1, :]', "weights['log_sigma']"], {}), "(outputs[:, -1, :], weights['log_sigma'])\n", (5449, 5490), True, 'import tensorflow as tf\n'), ((6467, 6571), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'multi_rnn_cell_', 'inputs': 'repeated_z', 'initial_state': 'states_', 'dtype': 'tf.float32'}), '(cell=multi_rnn_cell_, inputs=repeated_z, initial_state=\n states_, dtype=tf.float32)\n', (6484, 6571), True, 'import tensorflow as tf\n'), ((7342, 7417), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'multi_rnn_cell', 'inputs': 'repeated_z', 'dtype': 'tf.float32'}), '(cell=multi_rnn_cell, inputs=repeated_z, dtype=tf.float32)\n', (7359, 7417), True, 'import tensorflow as tf\n'), ((7592, 7650), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', (['self.x', 'self.x_reconstr_mean'], {}), '(self.x, self.x_reconstr_mean)\n', (7620, 7650), True, 'import tensorflow as tf\n'), ((8581, 8592), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (8589, 8592), True, 'import numpy as np\n'), ((8982, 9013), 'six.iteritems', 'six.iteritems', (['optimizer_params'], {}), '(optimizer_params)\n', (8995, 9013), False, 'import six\n'), ((9251, 9274), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['lstm_var'], {}), '(lstm_var)\n', (9264, 9274), True, 'import tensorflow as tf\n'), ((1691, 1706), 'tensorflow.shape', 'tf.shape', (['sigma'], {}), '(sigma)\n', (1699, 1706), True, 'import tensorflow as tf\n'), ((2352, 2399), 'tensorflow.nn.rnn_cell.LSTMStateTuple', 'tf.nn.rnn_cell.LSTMStateTuple', (['state_c', 'state_h'], {}), '(state_c, state_h)\n', (2381, 2399), True, 'import tensorflow as tf\n'), ((3541, 3579), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (3577, 3579), True, 'import tensorflow as tf\n'), ((3787, 3825), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (3823, 3825), True, 'import tensorflow as tf\n'), ((3972, 3994), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (3992, 3994), True, 'import tensorflow as tf\n'), ((4124, 4146), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (4144, 4146), True, 'import tensorflow as tf\n'), ((5859, 5901), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""lstm_decoder_stateful"""'], {}), "('lstm_decoder_stateful')\n", (5876, 5901), True, 'import tensorflow as tf\n'), ((6292, 6332), 'tensorflow.nn.rnn_cell.MultiRNNCell', 'tf.nn.rnn_cell.MultiRNNCell', (['rnn_layers_'], {}), '(rnn_layers_)\n', (6319, 6332), True, 'import tensorflow as tf\n'), ((6827, 6870), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""lstm_decoder_stateless"""'], {}), "('lstm_decoder_stateless')\n", (6844, 6870), True, 'import tensorflow as tf\n'), ((7259, 7298), 'tensorflow.nn.rnn_cell.MultiRNNCell', 'tf.nn.rnn_cell.MultiRNNCell', (['rnn_layers'], {}), '(rnn_layers)\n', (7286, 7298), True, 'import tensorflow as tf\n'), ((1258, 1304), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['self.input'], {}), '(self.input)\n', (1292, 1304), True, 'import tensorflow as tf\n'), ((1892, 1911), 'tensorflow.exp', 'tf.exp', (['(0.5 * sigma)'], {}), '(0.5 * sigma)\n', (1898, 1911), True, 'import tensorflow as tf\n'), ((4426, 4464), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (4462, 4464), True, 'import tensorflow as tf\n'), ((7901, 7921), 'tensorflow.exp', 'tf.exp', (['self.z_sigma'], {}), '(self.z_sigma)\n', (7907, 7921), True, 'import tensorflow as tf\n'), ((11395, 11406), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (11403, 11406), True, 'import numpy as np\n'), ((12279, 12290), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (12287, 12290), True, 'import numpy as np\n'), ((11577, 11588), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (11585, 11588), True, 'import numpy as np\n'), ((11758, 11769), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (11766, 11769), True, 'import numpy as np\n'), ((11787, 11798), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (11795, 11798), True, 'import numpy as np\n'), ((11935, 11946), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (11943, 11946), True, 'import numpy as np\n'), ((12433, 12444), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (12441, 12444), True, 'import numpy as np\n'), ((12569, 12580), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (12577, 12580), True, 'import numpy as np\n'), ((12598, 12609), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (12606, 12609), True, 'import numpy as np\n'), ((6066, 6104), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (6102, 6104), True, 'import tensorflow as tf\n'), ((7034, 7072), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (7070, 7072), True, 'import tensorflow as tf\n'), ((10185, 10196), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (10193, 10196), True, 'import numpy as np\n'), ((10394, 10405), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (10402, 10405), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logging utility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import io
import numpy as np
import scipy.io.wavfile
import scipy.misc
import tensorflow as tf
class Logger(object):
"""Logging utility that makes some things easier: - explicit control of when logs are written - supports adding data without tensor ops, e.g.
logger.add_scalar('cost', 5.0) - convenience method for saving sample
sheets (grids of images) - can print info to stdout as well
"""
def __init__(self, output_dir):
super(Logger, self).__init__()
# Create the directory if it doesn't exist already
if output_dir is not None:
self._output_dir = output_dir
tf.gfile.MakeDirs(output_dir)
self._writer = tf.summary.FileWriter(
output_dir, max_queue=9999999, flush_secs=9999999)
else:
print('Warning: Logger instantiated without an output dir. '
'Only printing to console.')
self._writer = None
self._since_last_print = collections.defaultdict(lambda: [])
self._summary_buffer = []
self._calls_to_pretty_print = 0
def pretty_print(self, step, tags):
"""Pretty-print the data since the last call to print."""
col_width = 12
if self._calls_to_pretty_print % 50 == 0:
print('step {}'.format(' '.join(
[t.ljust(col_width) for t in tags])))
self._calls_to_pretty_print += 1
def to_string(v):
return str(v).ljust(col_width)
values = [self._since_last_print[tag] for tag in tags]
values = [np.mean(v) for v in values]
values = [to_string(v) for v in values]
print('{} {}'.format(str(step).ljust(col_width), ' '.join(values)))
self._since_last_print = collections.defaultdict(lambda: [])
def print(self, step):
"""Print the data since the last call to print."""
def to_string(x):
if isinstance(x, list):
return np.mean(x)
else:
return x
prints = [
'{} {}'.format(name, to_string(val)) for name, val in sorted(
self._since_last_print.items(), key=lambda x: x[0])
]
to_print = ('iter {}\t{}'.format(step, ' '.join(prints)))
print(to_print)
self._since_last_print = collections.defaultdict(lambda: [])
def flush(self):
"""Flush the summary writer to disk."""
if self._writer:
for summary, step in self._summary_buffer:
self._writer.add_summary(summary, step)
self._writer.flush()
self._summary_buffer = []
def add_summary(self, summary, step):
self._summary_buffer.append((summary, step))
def add_scalar(self, tag, value, step):
"""Add a scalar summary."""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self._since_last_print[tag].append(value)
self.add_summary(summary, step)
def add_image(self, tag, image, step):
"""Add an image summary. image: HWC uint8 numpy array."""
s = io.StringIO()
scipy.misc.imsave(s, image, format='png')
summary_image = tf.Summary.Image(
encoded_image_string=s.getvalue(),
height=image.shape[0],
width=image.shape[1])
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, image=summary_image)])
self._since_last_print[tag] = '(image)'
self.add_summary(summary, step)
def add_image_grid(self, tag, images, step):
"""Add a grid of images. images: BHWC uint8 numpy array."""
# Calculate number of rows / cols
n_samples = images.shape[0]
n_rows = int(np.sqrt(n_samples))
while n_samples % n_rows != 0:
n_rows -= 1
n_cols = n_samples // n_rows
# Copy each image into its spot in the grid
height, width = images[0].shape[:2]
grid_image = np.zeros((height * n_rows, width * n_cols, 3), dtype='uint8')
for n, image in enumerate(images):
j = n // n_cols
i = n % n_cols
grid_image[j * height:j * height + height, i * width:i * width +
width] = image
self.add_image(tag, grid_image, step)
| [
"io.StringIO",
"tensorflow.gfile.MakeDirs",
"numpy.zeros",
"collections.defaultdict",
"tensorflow.summary.FileWriter",
"numpy.mean",
"tensorflow.Summary.Value",
"numpy.sqrt"
] | [((1670, 1706), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (1693, 1706), False, 'import collections\n'), ((2388, 2424), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (2411, 2424), False, 'import collections\n'), ((2880, 2916), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (2903, 2916), False, 'import collections\n'), ((3594, 3607), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (3605, 3607), False, 'import io\n'), ((4368, 4429), 'numpy.zeros', 'np.zeros', (['(height * n_rows, width * n_cols, 3)'], {'dtype': '"""uint8"""'}), "((height * n_rows, width * n_cols, 3), dtype='uint8')\n", (4376, 4429), True, 'import numpy as np\n'), ((1362, 1391), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['output_dir'], {}), '(output_dir)\n', (1379, 1391), True, 'import tensorflow as tf\n'), ((1413, 1485), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['output_dir'], {'max_queue': '(9999999)', 'flush_secs': '(9999999)'}), '(output_dir, max_queue=9999999, flush_secs=9999999)\n', (1434, 1485), True, 'import tensorflow as tf\n'), ((2209, 2219), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (2216, 2219), True, 'import numpy as np\n'), ((4156, 4174), 'numpy.sqrt', 'np.sqrt', (['n_samples'], {}), '(n_samples)\n', (4163, 4174), True, 'import numpy as np\n'), ((2572, 2582), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (2579, 2582), True, 'import numpy as np\n'), ((3352, 3397), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'tag', 'simple_value': 'value'}), '(tag=tag, simple_value=value)\n', (3368, 3397), True, 'import tensorflow as tf\n'), ((3828, 3874), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'tag', 'image': 'summary_image'}), '(tag=tag, image=summary_image)\n', (3844, 3874), True, 'import tensorflow as tf\n')] |
import numpy as np
from chameleon.legacy.skfeature.utility.mutual_information import su_calculation
def merit_calculation(X, y):
"""
This function calculates the merit of X given class labels y, where
merits = (k * rcf)/sqrt(k+k*(k-1)*rff)
rcf = (1/k)*sum(su(fi,y)) for all fi in X
rff = (1/(k*(k-1)))*sum(su(fi,fj)) for all fi and fj in X
Input
----------
X: {numpy array}, shape (n_samples, n_features)
input data
y: {numpy array}, shape (n_samples,)
input class labels
Output
----------
merits: {float}
merit of a feature subset X
"""
n_samples, n_features = X.shape
rff = 0
rcf = 0
for i in range(n_features):
fi = X[:, i]
rcf += su_calculation(fi, y)
for j in range(n_features):
if j > i:
fj = X[:, j]
rff += su_calculation(fi, fj)
rff *= 2
merits = rcf / np.sqrt(n_features + rff)
return merits
def cfs(X, y):
"""
This function uses a correlation based heuristic to evaluate the worth of features which is called CFS
Input
-----
X: {numpy array}, shape (n_samples, n_features)
input data
y: {numpy array}, shape (n_samples,)
input class labels
Output
------
F: {numpy array}
index of selected features
Reference
---------
<NAME> et al. "Advancing Feature Selection Research - ASU Feature Selection Repository" 2010.
"""
n_samples, n_features = X.shape
F = []
# M stores the merit values
M = []
while True:
merit = -100000000000
idx = -1
for i in range(n_features):
if i not in F:
F.append(i)
# calculate the merit of current selected features
t = merit_calculation(X[:, F], y)
if t > merit:
merit = t
idx = i
F.pop()
F.append(idx)
M.append(merit)
if len(M) > 5:
if M[len(M)-1] <= M[len(M)-2]:
if M[len(M)-2] <= M[len(M)-3]:
if M[len(M)-3] <= M[len(M)-4]:
if M[len(M)-4] <= M[len(M)-5]:
break
return np.array(F)
| [
"numpy.array",
"chameleon.legacy.skfeature.utility.mutual_information.su_calculation",
"numpy.sqrt"
] | [((2268, 2279), 'numpy.array', 'np.array', (['F'], {}), '(F)\n', (2276, 2279), True, 'import numpy as np\n'), ((747, 768), 'chameleon.legacy.skfeature.utility.mutual_information.su_calculation', 'su_calculation', (['fi', 'y'], {}), '(fi, y)\n', (761, 768), False, 'from chameleon.legacy.skfeature.utility.mutual_information import su_calculation\n'), ((934, 959), 'numpy.sqrt', 'np.sqrt', (['(n_features + rff)'], {}), '(n_features + rff)\n', (941, 959), True, 'import numpy as np\n'), ((879, 901), 'chameleon.legacy.skfeature.utility.mutual_information.su_calculation', 'su_calculation', (['fi', 'fj'], {}), '(fi, fj)\n', (893, 901), False, 'from chameleon.legacy.skfeature.utility.mutual_information import su_calculation\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from torchvision import ops
# ops.DeformConv2d()
from .correlation_package.correlation import Correlation
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, activation=True):
if activation:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.LeakyReLU(0.1))
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True))
def predict_flow(in_planes):
return nn.Conv2d(in_planes, 2, kernel_size=3, stride=1, padding=1, bias=True)
def predict_mask(in_planes):
return nn.Conv2d(in_planes, 1, kernel_size=3, stride=1, padding=1, bias=True)
def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1):
return nn.ConvTranspose2d(in_planes, out_planes, kernel_size, stride, padding, bias=True)
def deformable_conv(in_planes, out_planes, kernel_size=3, strides=1, padding=1, use_bias=True):
return ops.DeformConv2d(in_planes, out_planes, kernel_size, strides, padding, bias=use_bias)
def upsample_kernel2d(w, device):
c = w // 2
kernel = 1 - torch.abs(c - torch.arange(w, dtype=torch.float32, device=device)) / (c + 1)
kernel = kernel.repeat(w).view(w,-1) * kernel.unsqueeze(1)
return kernel.view(1, 1, w, w)
def downsample_kernel2d(w, device):
kernel = ((w + 1) - torch.abs(w - torch.arange(w * 2 + 1, dtype=torch.float32, device=device))) / (2 * w + 1)
kernel = kernel.repeat(w).view(w,-1) * kernel.unsqueeze(1)
return kernel.view(1, 1, w * 2 + 1, w * 2 + 1)
def Upsample(img, factor):
if factor == 1:
return img
B, C, H, W = img.shape
batch_img = img.view(B*C, 1, H, W)
batch_img = F.pad(batch_img, [0, 1, 0, 1], mode='replicate')
kernel = upsample_kernel2d(factor * 2 - 1, img.device)
upsamp_img = F.conv_transpose2d(batch_img, kernel, stride=factor, padding=(factor-1))
upsamp_img = upsamp_img[:, :, : -1, :-1]
_, _, H_up, W_up = upsamp_img.shape
return upsamp_img.view(B, C, H_up, W_up)
def Downsample(img, factor):
if factor == 1:
return img
B, C, H, W = img.shape
batch_img = img.view(B*C, 1, H, W)
kernel = downsample_kernel2d(factor // 2, img.device)
upsamp_img = F.conv2d(batch_img, kernel, stride=factor, padding=factor//2)
upsamp_nom = F.conv2d(torch.ones_like(batch_img), kernel, stride=factor, padding=factor//2)
_, _, H_up, W_up = upsamp_img.shape
upsamp_img = upsamp_img.view(B, C, H_up, W_up)
upsamp_nom = upsamp_nom.view(B, C, H_up, W_up)
return upsamp_img / upsamp_nom
class MaskFlownet_S(nn.Module):
"""
PWC-DC net. add dilation convolution and densenet connections
"""
def __init__(self, config = None, **kwargs):
"""
input: md --- maximum displacement (for correlation. default: 4), after warpping
"""
super(MaskFlownet_S, self).__init__()
self.scale = 20. * config.network.flow_multiplier.get(1.)
md = 4
self.md = md
self.strides = [64, 32, 16, 8, 4]
self.deform_bias = config.network.deform_bias.get(True)
self.upfeat_ch = config.network.upfeat_ch.get([16, 16, 16, 16])
self.conv1a = conv(3, 16, kernel_size=3, stride=2)
self.conv1b = conv(16, 16, kernel_size=3, stride=1)
self.conv1c = conv(16, 16, kernel_size=3, stride=1)
self.conv2a = conv(16, 32, kernel_size=3, stride=2)
self.conv2b = conv(32, 32, kernel_size=3, stride=1)
self.conv2c = conv(32, 32, kernel_size=3, stride=1)
self.conv3a = conv(32, 64, kernel_size=3, stride=2)
self.conv3b = conv(64, 64, kernel_size=3, stride=1)
self.conv3c = conv(64, 64, kernel_size=3, stride=1)
self.conv4a = conv(64, 96, kernel_size=3, stride=2)
self.conv4b = conv(96, 96, kernel_size=3, stride=1)
self.conv4c = conv(96, 96, kernel_size=3, stride=1)
self.conv5a = conv(96, 128, kernel_size=3, stride=2)
self.conv5b = conv(128, 128, kernel_size=3, stride=1)
self.conv5c = conv(128, 128, kernel_size=3, stride=1)
self.conv6a = conv(128, 196, kernel_size=3, stride=2)
self.conv6b = conv(196, 196, kernel_size=3, stride=1)
self.conv6c = conv(196, 196, kernel_size=3, stride=1)
self.corr = Correlation(pad_size=md, kernel_size=1, max_displacement=md, stride1=1, stride2=1, corr_multiply=1)
self.leakyRELU = nn.LeakyReLU(0.1)
nd = (2*md+1)**2
dd = np.cumsum([128,128,96,64,32])
od = nd
self.conv6_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv6_1 = conv(od+dd[0],128, kernel_size=3, stride=1)
self.conv6_2 = conv(od+dd[1],96, kernel_size=3, stride=1)
self.conv6_3 = conv(od+dd[2],64, kernel_size=3, stride=1)
self.conv6_4 = conv(od+dd[3],32, kernel_size=3, stride=1)
self.pred_flow6 = predict_flow(od + dd[4])
self.pred_mask6 = predict_mask(od + dd[4])
self.upfeat5 = deconv(od+dd[4], self.upfeat_ch[0], kernel_size=4, stride=2, padding=1)
# self.deconv6 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
# self.upfeat6 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
# od = nd+128+4
od = nd+128+18
self.conv5_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv5_1 = conv(od+dd[0],128, kernel_size=3, stride=1)
self.conv5_2 = conv(od+dd[1],96, kernel_size=3, stride=1)
self.conv5_3 = conv(od+dd[2],64, kernel_size=3, stride=1)
self.conv5_4 = conv(od+dd[3],32, kernel_size=3, stride=1)
self.pred_flow5 = predict_flow(od + dd[4])
self.pred_mask5 = predict_mask(od + dd[4])
self.upfeat4 = deconv(od+dd[4], self.upfeat_ch[1], kernel_size=4, stride=2, padding=1)
# self.deconv5 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
# self.upfeat5 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
# od = nd+96+4
od = nd+96+18
self.conv4_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv4_1 = conv(od+dd[0],128, kernel_size=3, stride=1)
self.conv4_2 = conv(od+dd[1],96, kernel_size=3, stride=1)
self.conv4_3 = conv(od+dd[2],64, kernel_size=3, stride=1)
self.conv4_4 = conv(od+dd[3],32, kernel_size=3, stride=1)
self.pred_flow4 = predict_flow(od + dd[4])
self.pred_mask4 = predict_mask(od + dd[4])
self.upfeat3 = deconv(od+dd[4], self.upfeat_ch[2], kernel_size=4, stride=2, padding=1)
# self.deconv4 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
# self.upfeat4 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
# od = nd+64+4
od = nd+64+18
self.conv3_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv3_1 = conv(od+dd[0],128, kernel_size=3, stride=1)
self.conv3_2 = conv(od+dd[1],96, kernel_size=3, stride=1)
self.conv3_3 = conv(od+dd[2],64, kernel_size=3, stride=1)
self.conv3_4 = conv(od+dd[3],32, kernel_size=3, stride=1)
self.pred_flow3 = predict_flow(od + dd[4])
self.pred_mask3 = predict_mask(od + dd[4])
self.upfeat2 = deconv(od+dd[4], self.upfeat_ch[3], kernel_size=4, stride=2, padding=1)
# self.deconv3 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
# self.upfeat3 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
# od = nd+32+4
od = nd+32+18
self.conv2_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv2_1 = conv(od+dd[0],128, kernel_size=3, stride=1)
self.conv2_2 = conv(od+dd[1],96, kernel_size=3, stride=1)
self.conv2_3 = conv(od+dd[2],64, kernel_size=3, stride=1)
self.conv2_4 = conv(od+dd[3],32, kernel_size=3, stride=1)
self.pred_flow2 = predict_flow(od + dd[4])
self.dc_conv1 = conv(od+dd[4], 128, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc_conv2 = conv(128, 128, kernel_size=3, stride=1, padding=2, dilation=2)
self.dc_conv3 = conv(128, 128, kernel_size=3, stride=1, padding=4, dilation=4)
self.dc_conv4 = conv(128, 96, kernel_size=3, stride=1, padding=8, dilation=8)
self.dc_conv5 = conv(96, 64, kernel_size=3, stride=1, padding=16, dilation=16)
self.dc_conv6 = conv(64, 32, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc_conv7 = predict_flow(32)
# self.upfeat5 = deconv()
self.deform5 = deformable_conv(128, 128)
self.deform4 = deformable_conv(96, 96)
self.deform3 = deformable_conv(64, 64)
self.deform2 = deformable_conv(32, 32)
self.conv5f = conv(16, 128, kernel_size=3, stride=1, padding=1, activation=False)
self.conv4f = conv(16, 96, kernel_size=3, stride=1, padding=1, activation=False)
self.conv3f = conv(16, 64, kernel_size=3, stride=1, padding=1, activation=False)
self.conv2f = conv(16, 32, kernel_size=3, stride=1, padding=1, activation=False)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal(m.weight.data, mode='fan_in')
if m.bias is not None:
m.bias.data.zero_()
def warp(self, x, flo):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
x: [B, C, H, W] (im2)
flo: [B, 2, H, W] flow
"""
B, C, H, W = x.size()
# mesh grid
xx = torch.arange(0, W).view(1,-1).repeat(H,1)
yy = torch.arange(0, H).view(-1,1).repeat(1,W)
xx = xx.view(1,1,H,W).repeat(B,1,1,1)
yy = yy.view(1,1,H,W).repeat(B,1,1,1)
grid = torch.cat((xx,yy),1).float()
device = x.device
grid = grid.to(device)
# vgrid = Variable(grid) + flo
vgrid = Variable(grid) + torch.flip(flo, [1])
# scale grid to [-1,1]
vgrid[:,0,:,:] = 2.0*vgrid[:,0,:,:].clone() / max(W-1,1)-1.0
vgrid[:,1,:,:] = 2.0*vgrid[:,1,:,:].clone() / max(H-1,1)-1.0
vgrid = vgrid.permute(0,2,3,1)
# vgrid = vgrid.permute(0,2,3,1).clamp(-1.1, 1.1)
output = nn.functional.grid_sample(x, vgrid, align_corners=True)
mask = torch.autograd.Variable(torch.ones(x.size())).to(device)
mask = nn.functional.grid_sample(mask, vgrid, align_corners=True)
# if W==128:
# np.save('mask.npy', mask.cpu().data.numpy())
# np.save('warp.npy', output.cpu().data.numpy())
mask[mask<0.9999] = 0
mask[mask>0] = 1
return output*mask
def forward(self, im1, im2):
# im1 = x[:,:3,:,:]
# im2 = x[:,3:,:,:]
c11 = self.conv1c(self.conv1b(self.conv1a(im1)))
c21 = self.conv1c(self.conv1b(self.conv1a(im2)))
c12 = self.conv2c(self.conv2b(self.conv2a(c11)))
c22 = self.conv2c(self.conv2b(self.conv2a(c21)))
c13 = self.conv3c(self.conv3b(self.conv3a(c12)))
c23 = self.conv3c(self.conv3b(self.conv3a(c22)))
c14 = self.conv4c(self.conv4b(self.conv4a(c13)))
c24 = self.conv4c(self.conv4b(self.conv4a(c23)))
c15 = self.conv5c(self.conv5b(self.conv5a(c14)))
c25 = self.conv5c(self.conv5b(self.conv5a(c24)))
c16 = self.conv6c(self.conv6b(self.conv6a(c15)))
c26 = self.conv6c(self.conv6b(self.conv6a(c25)))
corr6 = self.corr(c16, c26)
corr6 = self.leakyRELU(corr6)
x = torch.cat((self.conv6_0(corr6), corr6),1)
x = torch.cat((self.conv6_1(x), x),1)
x = torch.cat((self.conv6_2(x), x),1)
x = torch.cat((self.conv6_3(x), x),1)
x = torch.cat((self.conv6_4(x), x),1)
flow6 = self.pred_flow6(x)
mask6 = self.pred_mask6(x)
feat5 = self.leakyRELU(self.upfeat5(x))
flow5 = Upsample(flow6, 2)
mask5 = Upsample(mask6, 2)
warp5 = (flow5*self.scale/self.strides[1]).unsqueeze(1)
warp5 = torch.repeat_interleave(warp5, 9, 1)
S1, S2, S3, S4, S5 = warp5.shape
warp5 = warp5.view(S1, S2*S3, S4, S5)
warp5 = self.deform5(c25, warp5)
tradeoff5 = feat5
warp5 = (warp5 * F.sigmoid(mask5)) + self.conv5f(tradeoff5)
warp5 = self.leakyRELU(warp5)
corr5 = self.corr(c15, warp5)
corr5 = self.leakyRELU(corr5)
x = torch.cat((corr5, c15, feat5, flow5), 1)
x = torch.cat((self.conv5_0(x), x),1)
x = torch.cat((self.conv5_1(x), x),1)
x = torch.cat((self.conv5_2(x), x),1)
x = torch.cat((self.conv5_3(x), x),1)
x = torch.cat((self.conv5_4(x), x),1)
flow5 = flow5 + self.pred_flow5(x)
mask5 = self.pred_mask5(x)
feat4 = self.leakyRELU(self.upfeat4(x))
flow4 = Upsample(flow5, 2)
mask4 = Upsample(mask5, 2)
warp4 = (flow4*self.scale/self.strides[2]).unsqueeze(1)
warp4 = torch.repeat_interleave(warp4, 9, 1)
S1, S2, S3, S4, S5 = warp4.shape
warp4 = warp4.view(S1, S2*S3, S4, S5)
warp4 = self.deform4(c24, warp4)
tradeoff4 = feat4
warp4 = (warp4 * F.sigmoid(mask4)) + self.conv4f(tradeoff4)
warp4 = self.leakyRELU(warp4)
corr4 = self.corr(c14, warp4)
corr4 = self.leakyRELU(corr4)
x = torch.cat((corr4, c14, feat4, flow4), 1)
x = torch.cat((self.conv4_0(x), x),1)
x = torch.cat((self.conv4_1(x), x),1)
x = torch.cat((self.conv4_2(x), x),1)
x = torch.cat((self.conv4_3(x), x),1)
x = torch.cat((self.conv4_4(x), x),1)
flow4 = flow4 + self.pred_flow4(x)
mask4 = self.pred_mask4(x)
feat3 = self.leakyRELU(self.upfeat3(x))
flow3 = Upsample(flow4, 2)
mask3 = Upsample(mask4, 2)
warp3 = (flow3*self.scale/self.strides[3]).unsqueeze(1)
warp3 = torch.repeat_interleave(warp3, 9, 1)
S1, S2, S3, S4, S5 = warp3.shape
warp3 = warp3.view(S1, S2*S3, S4, S5)
warp3 = self.deform3(c23, warp3)
tradeoff3 = feat3
warp3 = (warp3 * F.sigmoid(mask3)) + self.conv3f(tradeoff3)
warp3 = self.leakyRELU(warp3)
corr3 = self.corr(c13, warp3)
corr3 = self.leakyRELU(corr3)
x = torch.cat((corr3, c13, feat3, flow3), 1)
x = torch.cat((self.conv3_0(x), x),1)
x = torch.cat((self.conv3_1(x), x),1)
x = torch.cat((self.conv3_2(x), x),1)
x = torch.cat((self.conv3_3(x), x),1)
x = torch.cat((self.conv3_4(x), x),1)
flow3 = flow3 + self.pred_flow3(x)
mask3 = self.pred_mask3(x)
feat2 = self.leakyRELU(self.upfeat2(x))
flow2 = Upsample(flow3, 2)
mask2 = Upsample(mask3, 2)
warp2 = (flow2*self.scale/self.strides[4]).unsqueeze(1)
warp2 = torch.repeat_interleave(warp2, 9, 1)
S1, S2, S3, S4, S5 = warp2.shape
warp2 = warp2.view(S1, S2*S3, S4, S5)
warp2 = self.deform2(c22, warp2)
tradeoff2 = feat2
warp2 = (warp2 * F.sigmoid(mask2)) + self.conv2f(tradeoff2)
warp2 = self.leakyRELU(warp2)
corr2 = self.corr(c12, warp2)
corr2 = self.leakyRELU(corr2)
x = torch.cat((corr2, c12, feat2, flow2), 1)
x = torch.cat((self.conv2_0(x), x),1)
x = torch.cat((self.conv2_1(x), x),1)
x = torch.cat((self.conv2_2(x), x),1)
x = torch.cat((self.conv2_3(x), x),1)
x = torch.cat((self.conv2_4(x), x),1)
flow2 = flow2 + self.pred_flow2(x)
x = self.dc_conv4(self.dc_conv3(self.dc_conv2(self.dc_conv1(x))))
flow2 = flow2 + self.dc_conv7(self.dc_conv6(self.dc_conv5(x)))
predictions = [flow * self.scale for flow in [flow6, flow5, flow4, flow3, flow2]]
occlusion_masks = []
occlusion_masks.append(F.sigmoid(mask2))
c1s = [c11, c12, c13, c14, c15, c16]
c2s = [c21, c12, c13, c24, c25, c26]
flows = [flow6, flow5, flow4, flow3, flow2]
mask0 = Upsample(mask2, 4)
mask0 = F.sigmoid(mask0) - 0.5
c30 = im1
c40 = self.warp(im2, Upsample(flow2, 4)*self.scale)
c30 = torch.cat((c30, torch.zeros_like(mask0)), 1)
c40 = torch.cat((c40, mask0), 1)
srcs = [c1s, c2s, flows, c30, c40]
return predictions, occlusion_masks, srcs
class MaskFlownet(nn.Module):
def __init__(self, config, **kwargs):
super(MaskFlownet, self).__init__(**kwargs)
self.strides = [64, 32, 16, 8, 4]
self.md = 2
self.scale = 20. * config.network.flow_multiplier.get(1.)
self.deform_bias = config.network.deform_bias.get(True)
self.upfeat_ch = config.network.upfeat_ch.get([16, 16, 16, 16])
self.MaskFlownet_S = MaskFlownet_S(config)
self.activate = nn.LeakyReLU(0.1)
self.conv1x = conv(4, 16, stride=2)
self.conv1y = conv(16, 16, stride=1)
self.conv1z = conv(16, 16, stride=1)
self.conv2x = conv(16, 32, stride=2)
self.conv2y = conv(32, 32, stride=1)
self.conv2z = conv(32, 32, stride=1)
self.conv3x = conv(32, 64, stride=2)
self.conv3y = conv(64, 64, stride=1)
self.conv3z = conv(64, 64, stride=1)
self.conv4x = conv(64, 96, stride=2)
self.conv4y = conv(96, 96, stride=1)
self.conv4z = conv(96, 96, stride=1)
self.conv5x = conv(96, 128, stride=2)
self.conv5y = conv(128, 128, stride=1)
self.conv5z = conv(128, 128, stride=1)
self.conv6x = conv(128, 196, stride=2)
self.conv6y = conv(196, 196, stride=1)
self.conv6z = conv(196, 196, stride=1)
self.leakyRELU = nn.LeakyReLU(0.1)
self.corr = Correlation(pad_size=self.md, kernel_size=1, max_displacement=self.md, stride1=1, stride2=1, corr_multiply=1)
nd = (2*self.md+1)**2
dd = np.cumsum([128,128,96,64,32])
od = nd+nd+2
self.conv6_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv6_1 = conv(od+dd[0], 128, kernel_size=3, stride=1)
self.conv6_2 = conv(od+dd[1], 96, kernel_size=3, stride=1)
self.conv6_3 = conv(od+dd[2], 64, kernel_size=3, stride=1)
self.conv6_4 = conv(od+dd[3], 32, kernel_size=3, stride=1)
self.pred_flow6 = predict_flow(od + dd[4])
self.upfeat5 = deconv(od+dd[4], self.upfeat_ch[0], kernel_size=4, stride=2, padding=1)
# od = nd+128+4
od = nd+nd+128+16+2+2
self.conv5_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv5_1 = conv(od+dd[0],128, kernel_size=3, stride=1)
self.conv5_2 = conv(od+dd[1],96, kernel_size=3, stride=1)
self.conv5_3 = conv(od+dd[2],64, kernel_size=3, stride=1)
self.conv5_4 = conv(od+dd[3],32, kernel_size=3, stride=1)
self.pred_flow5 = predict_flow(od + dd[4])
self.upfeat4 = deconv(od+dd[4], self.upfeat_ch[1], kernel_size=4, stride=2, padding=1)
# self.deconv5 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
# self.upfeat5 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
# od = nd+96+4
# od = nd+96+18
od = nd+nd+96+16+2+2
self.conv4_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv4_1 = conv(od+dd[0],128, kernel_size=3, stride=1)
self.conv4_2 = conv(od+dd[1],96, kernel_size=3, stride=1)
self.conv4_3 = conv(od+dd[2],64, kernel_size=3, stride=1)
self.conv4_4 = conv(od+dd[3],32, kernel_size=3, stride=1)
self.pred_flow4 = predict_flow(od + dd[4])
self.upfeat3 = deconv(od+dd[4], self.upfeat_ch[2], kernel_size=4, stride=2, padding=1)
# self.deconv4 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
# self.upfeat4 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
# od = nd+64+4
# od = nd+64+18
od = nd+nd+64+16+2+2
self.conv3_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv3_1 = conv(od+dd[0],128, kernel_size=3, stride=1)
self.conv3_2 = conv(od+dd[1],96, kernel_size=3, stride=1)
self.conv3_3 = conv(od+dd[2],64, kernel_size=3, stride=1)
self.conv3_4 = conv(od+dd[3],32, kernel_size=3, stride=1)
self.pred_flow3 = predict_flow(od + dd[4])
self.upfeat2 = deconv(od+dd[4], self.upfeat_ch[3], kernel_size=4, stride=2, padding=1)
# self.deconv3 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
# self.upfeat3 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
# od = nd+32+4
# od = nd+32+18
od = nd+nd+32+16+2+2
self.conv2_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv2_1 = conv(od+dd[0],128, kernel_size=3, stride=1)
self.conv2_2 = conv(od+dd[1],96, kernel_size=3, stride=1)
self.conv2_3 = conv(od+dd[2],64, kernel_size=3, stride=1)
self.conv2_4 = conv(od+dd[3],32, kernel_size=3, stride=1)
self.pred_flow2 = predict_flow(od + dd[4])
self.dc_conv1 = conv(od+dd[4], 128, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc_conv2 = conv(128, 128, kernel_size=3, stride=1, padding=2, dilation=2)
self.dc_conv3 = conv(128, 128, kernel_size=3, stride=1, padding=4, dilation=4)
self.dc_conv4 = conv(128, 96, kernel_size=3, stride=1, padding=8, dilation=8)
self.dc_conv5 = conv(96, 64, kernel_size=3, stride=1, padding=16, dilation=16)
self.dc_conv6 = conv(64, 32, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc_conv7 = predict_flow(32)
# self.upfeat5 = deconv()
self.deform6 = deformable_conv(196, 196)
self.deform5 = deformable_conv(128, 128)
self.deform4 = deformable_conv(96, 96)
self.deform3 = deformable_conv(64, 64)
self.deform2 = deformable_conv(32, 32)
def warp(self, x, flo):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
x: [B, C, H, W] (im2)
flo: [B, 2, H, W] flow
"""
B, C, H, W = x.size()
# mesh grid
xx = torch.arange(0, W).view(1,-1).repeat(H,1)
yy = torch.arange(0, H).view(-1,1).repeat(1,W)
xx = xx.view(1,1,H,W).repeat(B,1,1,1)
yy = yy.view(1,1,H,W).repeat(B,1,1,1)
grid = torch.cat((xx,yy),1).float()
if x.is_cuda:
grid = grid.cuda()
# vgrid = Variable(grid) + flo
vgrid = Variable(grid) + torch.flip(flo, [1])
# scale grid to [-1,1]
vgrid[:,0,:,:] = 2.0*vgrid[:,0,:,:].clone() / max(W-1,1)-1.0
vgrid[:,1,:,:] = 2.0*vgrid[:,1,:,:].clone() / max(H-1,1)-1.0
# vgrid = vgrid.permute(0,2,3,1)
vgrid = vgrid.permute(0,2,3,1).clamp(-1.1, 1.1)
output = nn.functional.grid_sample(x, vgrid, align_corners=True)
mask = torch.autograd.Variable(torch.ones(x.size())).cuda()
mask = nn.functional.grid_sample(mask, vgrid, align_corners=True)
# if W==128:
# np.save('mask.npy', mask.cpu().data.numpy())
# np.save('warp.npy', output.cpu().data.numpy())
mask[mask<0.9999] = 0
mask[mask>0] = 1
return output*mask
def forward(self, im1, im2):
# im1 = x[:,:3,:,:]
# im2 = x[:,3:,:,:]
_, _, srcs = self.MaskFlownet_S(im1, im2)
c1s, c2s, flows, c30, c40 = srcs
c11, c12, c13, c14, c15, c16 = c1s
c21, c22, c23, c24, c25, c26 = c2s
c31 = self.conv1z(self.conv1y(self.conv1x(c30)))
c32 = self.conv2z(self.conv2y(self.conv2x(c31)))
c33 = self.conv3z(self.conv3y(self.conv3x(c32)))
c34 = self.conv4z(self.conv4y(self.conv4x(c33)))
c35 = self.conv5z(self.conv5y(self.conv5x(c34)))
c36 = self.conv6z(self.conv6y(self.conv6x(c35)))
c41 = self.conv1z(self.conv1y(self.conv1x(c40)))
c42 = self.conv2z(self.conv2y(self.conv2x(c41)))
c43 = self.conv3z(self.conv3y(self.conv3x(c42)))
c44 = self.conv4z(self.conv4y(self.conv4x(c43)))
c45 = self.conv5z(self.conv5y(self.conv5x(c44)))
c46 = self.conv6z(self.conv6y(self.conv6x(c45)))
flow6 = flows[0]
warp6u = (flow6*self.scale/self.strides[0]).unsqueeze(1)
warp6u = torch.repeat_interleave(warp6u, 9, 1)
S1, S2, S3, S4, S5 = warp6u.shape
warp6u = warp6u.view(S1, S2*S3, S4, S5)
warp6u = self.deform6(c26, warp6u)
warp6u = self.leakyRELU(warp6u)
corr6u = self.leakyRELU(self.corr(c16, warp6u))
warp6v = c46
corr6v = self.leakyRELU(self.corr(c36, warp6v))
x = torch.cat((corr6u, corr6v, flow6),1)
x = torch.cat((self.conv6_0(x), x),1)
x = torch.cat((self.conv6_1(x), x),1)
x = torch.cat((self.conv6_2(x), x),1)
x = torch.cat((self.conv6_3(x), x),1)
x = torch.cat((self.conv6_4(x), x),1)
flow6 = flow6 + self.pred_flow6(x)
feat5 = self.leakyRELU(self.upfeat5(x))
flow5 = Upsample(flow6, 2)
warp5u = (flow5*self.scale/self.strides[1]).unsqueeze(1)
warp5u = torch.repeat_interleave(warp5u, 9, 1)
S1, S2, S3, S4, S5 = warp5u.shape
warp5u = warp5u.view(S1, S2*S3, S4, S5)
warp5u = self.deform5(c25, warp5u)
warp5u = self.leakyRELU(warp5u)
corr5u = self.leakyRELU(self.corr(c15, warp5u))
warp5v = c45
corr5v = self.leakyRELU(self.corr(c35, warp5v))
x = torch.cat((c15, feat5, corr5u, corr5v, flow5, flows[1]),1)
x = torch.cat((self.conv5_0(x), x),1)
x = torch.cat((self.conv5_1(x), x),1)
x = torch.cat((self.conv5_2(x), x),1)
x = torch.cat((self.conv5_3(x), x),1)
x = torch.cat((self.conv5_4(x), x),1)
flow5 = flow5 + self.pred_flow5(x)
feat4 = self.leakyRELU(self.upfeat4(x))
flow4 = Upsample(flow5, 2)
warp4u = (flow4*self.scale/self.strides[2]).unsqueeze(1)
warp4u = torch.repeat_interleave(warp4u, 9, 1)
S1, S2, S3, S4, S5 = warp4u.shape
warp4u = warp4u.view(S1, S2*S3, S4, S5)
warp4u = self.deform4(c24, warp4u)
warp4u = self.leakyRELU(warp4u)
corr4u = self.leakyRELU(self.corr(c14, warp4u))
warp4v = c44
corr4v = self.leakyRELU(self.corr(c34, warp4v))
x = torch.cat((c14, feat4, corr4u, corr4v, flow4, flows[2]),1)
x = torch.cat((self.conv4_0(x), x),1)
x = torch.cat((self.conv4_1(x), x),1)
x = torch.cat((self.conv4_2(x), x),1)
x = torch.cat((self.conv4_3(x), x),1)
x = torch.cat((self.conv4_4(x), x),1)
flow4 = flow4 + self.pred_flow4(x)
feat3 = self.leakyRELU(self.upfeat3(x))
flow3 = Upsample(flow4, 2)
warp3u = (flow3*self.scale/self.strides[3]).unsqueeze(1)
warp3u = torch.repeat_interleave(warp3u, 9, 1)
S1, S2, S3, S4, S5 = warp3u.shape
warp3u = warp3u.view(S1, S2*S3, S4, S5)
warp3u = self.deform3(c23, warp3u)
warp3u = self.leakyRELU(warp3u)
corr3u = self.leakyRELU(self.corr(c13, warp3u))
warp3v = c43
corr3v = self.leakyRELU(self.corr(c33, warp3v))
x = torch.cat((c13, feat3, corr3u, corr3v, flow3, flows[3]),1)
x = torch.cat((self.conv3_0(x), x),1)
x = torch.cat((self.conv3_1(x), x),1)
x = torch.cat((self.conv3_2(x), x),1)
x = torch.cat((self.conv3_3(x), x),1)
x = torch.cat((self.conv3_4(x), x),1)
flow3 = flow3 + self.pred_flow3(x)
feat2 = self.leakyRELU(self.upfeat2(x))
flow2 = Upsample(flow3, 2)
warp2u = (flow2*self.scale/self.strides[4]).unsqueeze(1)
warp2u = torch.repeat_interleave(warp2u, 9, 1)
S1, S2, S3, S4, S5 = warp2u.shape
warp2u = warp2u.view(S1, S2*S3, S4, S5)
warp2u = self.deform2(c22, warp2u)
warp2u = self.leakyRELU(warp2u)
corr2u = self.leakyRELU(self.corr(c12, warp2u))
warp2v = c42
corr2v = self.leakyRELU(self.corr(c32, warp2v))
x = torch.cat((c12, feat2, corr2u, corr2v, flow2, flows[4]),1)
x = torch.cat((self.conv2_0(x), x),1)
x = torch.cat((self.conv2_1(x), x),1)
x = torch.cat((self.conv2_2(x), x),1)
x = torch.cat((self.conv2_3(x), x),1)
x = torch.cat((self.conv2_4(x), x),1)
flow2 = flow2 + self.pred_flow2(x)
x = self.dc_conv4(self.dc_conv3(self.dc_conv2(self.dc_conv1(x))))
flow2 = flow2 + self.dc_conv7(self.dc_conv6(self.dc_conv5(x)))
preds = [flow * self.scale for flow in [flow6, flow5, flow4, flow3, flow2]]
visuals = []
visuals.append(flow2[:,:1])
return preds, visuals, []
class EpeLoss(nn.Module):
def __init__(self, eps = 0):
super(EpeLoss, self).__init__()
self.eps = eps
def forward(self, pred, label):
loss = ((pred - label).pow(2).sum(1) + self.eps).sqrt()
return loss.view(loss.shape[0], -1).mean(1)
class EpeLossWithMask(nn.Module):
def __init__(self, eps=1e-8, q=None):
super(EpeLossWithMask, self).__init__()
self.eps = eps
self.q = q
def forward(self, pred, label, mask):
if self.q is not None:
loss = ((pred - label).abs().sum(1) + self.eps) ** self.q
else:
loss = ((pred - label).pow(2).sum(1) + self.eps).sqrt()
loss = loss * mask.squeeze(1)
loss = loss.view(loss.shape[0], -1).sum(1) / mask.view(mask.shape[0], -1).sum(1)
return loss
class MultiscaleEpe(nn.Module):
def __init__(self, scales, weights, match, eps = 1e-8, q = None):
super(MultiscaleEpe, self).__init__()
self.scales = scales
self.weights = weights
self.match = match
self.eps = eps
self.q = q
def forward(self, flow, mask, *predictions):
losses = 0
if self.match == 'upsampling':
for p, w, s in zip(predictions, self.weights, self.scales):
losses += EpeLossWithMask(eps=self.eps, q=self.q)(Upsample(p, s), flow, mask) * w
elif self.match == 'downsampling':
for p, w, s in zip(predictions, self.weights, self.scales):
losses += EpeLossWithMask(eps=self.eps, q=self.q)(p, Downsample(flow, s), Downsample(mask, s)) * w
else:
raise NotImplementedError
return losses
| [
"torch.ones_like",
"torch.repeat_interleave",
"torch.flip",
"torch.nn.ConvTranspose2d",
"torch.nn.functional.grid_sample",
"torch.zeros_like",
"torch.autograd.Variable",
"torch.nn.functional.conv_transpose2d",
"torch.nn.Conv2d",
"torch.nn.functional.conv2d",
"torch.cat",
"numpy.cumsum",
"tor... | [((801, 871), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', '(2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(in_planes, 2, kernel_size=3, stride=1, padding=1, bias=True)\n', (810, 871), True, 'import torch.nn as nn\n'), ((914, 984), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', '(1)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(in_planes, 1, kernel_size=3, stride=1, padding=1, bias=True)\n', (923, 984), True, 'import torch.nn as nn\n'), ((1069, 1155), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['in_planes', 'out_planes', 'kernel_size', 'stride', 'padding'], {'bias': '(True)'}), '(in_planes, out_planes, kernel_size, stride, padding,\n bias=True)\n', (1087, 1155), True, 'import torch.nn as nn\n'), ((1261, 1351), 'torchvision.ops.DeformConv2d', 'ops.DeformConv2d', (['in_planes', 'out_planes', 'kernel_size', 'strides', 'padding'], {'bias': 'use_bias'}), '(in_planes, out_planes, kernel_size, strides, padding, bias\n =use_bias)\n', (1277, 1351), False, 'from torchvision import ops\n'), ((2006, 2054), 'torch.nn.functional.pad', 'F.pad', (['batch_img', '[0, 1, 0, 1]'], {'mode': '"""replicate"""'}), "(batch_img, [0, 1, 0, 1], mode='replicate')\n", (2011, 2054), True, 'import torch.nn.functional as F\n'), ((2131, 2203), 'torch.nn.functional.conv_transpose2d', 'F.conv_transpose2d', (['batch_img', 'kernel'], {'stride': 'factor', 'padding': '(factor - 1)'}), '(batch_img, kernel, stride=factor, padding=factor - 1)\n', (2149, 2203), True, 'import torch.nn.functional as F\n'), ((2545, 2608), 'torch.nn.functional.conv2d', 'F.conv2d', (['batch_img', 'kernel'], {'stride': 'factor', 'padding': '(factor // 2)'}), '(batch_img, kernel, stride=factor, padding=factor // 2)\n', (2553, 2608), True, 'import torch.nn.functional as F\n'), ((2633, 2659), 'torch.ones_like', 'torch.ones_like', (['batch_img'], {}), '(batch_img)\n', (2648, 2659), False, 'import torch\n'), ((4742, 4759), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (4754, 4759), True, 'import torch.nn as nn\n'), ((4799, 4832), 'numpy.cumsum', 'np.cumsum', (['[128, 128, 96, 64, 32]'], {}), '([128, 128, 96, 64, 32])\n', (4808, 4832), True, 'import numpy as np\n'), ((10536, 10591), 'torch.nn.functional.grid_sample', 'nn.functional.grid_sample', (['x', 'vgrid'], {'align_corners': '(True)'}), '(x, vgrid, align_corners=True)\n', (10561, 10591), True, 'import torch.nn as nn\n'), ((10679, 10737), 'torch.nn.functional.grid_sample', 'nn.functional.grid_sample', (['mask', 'vgrid'], {'align_corners': '(True)'}), '(mask, vgrid, align_corners=True)\n', (10704, 10737), True, 'import torch.nn as nn\n'), ((12317, 12353), 'torch.repeat_interleave', 'torch.repeat_interleave', (['warp5', '(9)', '(1)'], {}), '(warp5, 9, 1)\n', (12340, 12353), False, 'import torch\n'), ((12702, 12742), 'torch.cat', 'torch.cat', (['(corr5, c15, feat5, flow5)', '(1)'], {}), '((corr5, c15, feat5, flow5), 1)\n', (12711, 12742), False, 'import torch\n'), ((13250, 13286), 'torch.repeat_interleave', 'torch.repeat_interleave', (['warp4', '(9)', '(1)'], {}), '(warp4, 9, 1)\n', (13273, 13286), False, 'import torch\n'), ((13635, 13675), 'torch.cat', 'torch.cat', (['(corr4, c14, feat4, flow4)', '(1)'], {}), '((corr4, c14, feat4, flow4), 1)\n', (13644, 13675), False, 'import torch\n'), ((14183, 14219), 'torch.repeat_interleave', 'torch.repeat_interleave', (['warp3', '(9)', '(1)'], {}), '(warp3, 9, 1)\n', (14206, 14219), False, 'import torch\n'), ((14568, 14608), 'torch.cat', 'torch.cat', (['(corr3, c13, feat3, flow3)', '(1)'], {}), '((corr3, c13, feat3, flow3), 1)\n', (14577, 14608), False, 'import torch\n'), ((15116, 15152), 'torch.repeat_interleave', 'torch.repeat_interleave', (['warp2', '(9)', '(1)'], {}), '(warp2, 9, 1)\n', (15139, 15152), False, 'import torch\n'), ((15501, 15541), 'torch.cat', 'torch.cat', (['(corr2, c12, feat2, flow2)', '(1)'], {}), '((corr2, c12, feat2, flow2), 1)\n', (15510, 15541), False, 'import torch\n'), ((16497, 16523), 'torch.cat', 'torch.cat', (['(c40, mask0)', '(1)'], {}), '((c40, mask0), 1)\n', (16506, 16523), False, 'import torch\n'), ((17083, 17100), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (17095, 17100), True, 'import torch.nn as nn\n'), ((17962, 17979), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (17974, 17979), True, 'import torch.nn as nn\n'), ((18157, 18190), 'numpy.cumsum', 'np.cumsum', (['[128, 128, 96, 64, 32]'], {}), '([128, 128, 96, 64, 32])\n', (18166, 18190), True, 'import numpy as np\n'), ((23071, 23126), 'torch.nn.functional.grid_sample', 'nn.functional.grid_sample', (['x', 'vgrid'], {'align_corners': '(True)'}), '(x, vgrid, align_corners=True)\n', (23096, 23126), True, 'import torch.nn as nn\n'), ((23210, 23268), 'torch.nn.functional.grid_sample', 'nn.functional.grid_sample', (['mask', 'vgrid'], {'align_corners': '(True)'}), '(mask, vgrid, align_corners=True)\n', (23235, 23268), True, 'import torch.nn as nn\n'), ((24551, 24588), 'torch.repeat_interleave', 'torch.repeat_interleave', (['warp6u', '(9)', '(1)'], {}), '(warp6u, 9, 1)\n', (24574, 24588), False, 'import torch\n'), ((24907, 24944), 'torch.cat', 'torch.cat', (['(corr6u, corr6v, flow6)', '(1)'], {}), '((corr6u, corr6v, flow6), 1)\n', (24916, 24944), False, 'import torch\n'), ((25383, 25420), 'torch.repeat_interleave', 'torch.repeat_interleave', (['warp5u', '(9)', '(1)'], {}), '(warp5u, 9, 1)\n', (25406, 25420), False, 'import torch\n'), ((25739, 25798), 'torch.cat', 'torch.cat', (['(c15, feat5, corr5u, corr5v, flow5, flows[1])', '(1)'], {}), '((c15, feat5, corr5u, corr5v, flow5, flows[1]), 1)\n', (25748, 25798), False, 'import torch\n'), ((26237, 26274), 'torch.repeat_interleave', 'torch.repeat_interleave', (['warp4u', '(9)', '(1)'], {}), '(warp4u, 9, 1)\n', (26260, 26274), False, 'import torch\n'), ((26593, 26652), 'torch.cat', 'torch.cat', (['(c14, feat4, corr4u, corr4v, flow4, flows[2])', '(1)'], {}), '((c14, feat4, corr4u, corr4v, flow4, flows[2]), 1)\n', (26602, 26652), False, 'import torch\n'), ((27091, 27128), 'torch.repeat_interleave', 'torch.repeat_interleave', (['warp3u', '(9)', '(1)'], {}), '(warp3u, 9, 1)\n', (27114, 27128), False, 'import torch\n'), ((27447, 27506), 'torch.cat', 'torch.cat', (['(c13, feat3, corr3u, corr3v, flow3, flows[3])', '(1)'], {}), '((c13, feat3, corr3u, corr3v, flow3, flows[3]), 1)\n', (27456, 27506), False, 'import torch\n'), ((27945, 27982), 'torch.repeat_interleave', 'torch.repeat_interleave', (['warp2u', '(9)', '(1)'], {}), '(warp2u, 9, 1)\n', (27968, 27982), False, 'import torch\n'), ((28301, 28360), 'torch.cat', 'torch.cat', (['(c12, feat2, corr2u, corr2v, flow2, flows[4])', '(1)'], {}), '((c12, feat2, corr2u, corr2v, flow2, flows[4]), 1)\n', (28310, 28360), False, 'import torch\n'), ((390, 513), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'bias': '(True)'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, bias=True)\n', (399, 513), True, 'import torch.nn as nn\n'), ((545, 562), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (557, 562), True, 'import torch.nn as nn\n'), ((616, 739), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'bias': '(True)'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, bias=True)\n', (625, 739), True, 'import torch.nn as nn\n'), ((10213, 10227), 'torch.autograd.Variable', 'Variable', (['grid'], {}), '(grid)\n', (10221, 10227), False, 'from torch.autograd import Variable\n'), ((10230, 10250), 'torch.flip', 'torch.flip', (['flo', '[1]'], {}), '(flo, [1])\n', (10240, 10250), False, 'import torch\n'), ((16112, 16128), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['mask2'], {}), '(mask2)\n', (16121, 16128), True, 'import torch.nn.functional as F\n'), ((16323, 16339), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['mask0'], {}), '(mask0)\n', (16332, 16339), True, 'import torch.nn.functional as F\n'), ((22748, 22762), 'torch.autograd.Variable', 'Variable', (['grid'], {}), '(grid)\n', (22756, 22762), False, 'from torch.autograd import Variable\n'), ((22765, 22785), 'torch.flip', 'torch.flip', (['flo', '[1]'], {}), '(flo, [1])\n', (22775, 22785), False, 'import torch\n'), ((9479, 9531), 'torch.nn.init.kaiming_normal', 'nn.init.kaiming_normal', (['m.weight.data'], {'mode': '"""fan_in"""'}), "(m.weight.data, mode='fan_in')\n", (9501, 9531), True, 'import torch.nn as nn\n'), ((10071, 10093), 'torch.cat', 'torch.cat', (['(xx, yy)', '(1)'], {}), '((xx, yy), 1)\n', (10080, 10093), False, 'import torch\n'), ((12533, 12549), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['mask5'], {}), '(mask5)\n', (12542, 12549), True, 'import torch.nn.functional as F\n'), ((13466, 13482), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['mask4'], {}), '(mask4)\n', (13475, 13482), True, 'import torch.nn.functional as F\n'), ((14399, 14415), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['mask3'], {}), '(mask3)\n', (14408, 14415), True, 'import torch.nn.functional as F\n'), ((15332, 15348), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['mask2'], {}), '(mask2)\n', (15341, 15348), True, 'import torch.nn.functional as F\n'), ((16454, 16477), 'torch.zeros_like', 'torch.zeros_like', (['mask0'], {}), '(mask0)\n', (16470, 16477), False, 'import torch\n'), ((22610, 22632), 'torch.cat', 'torch.cat', (['(xx, yy)', '(1)'], {}), '((xx, yy), 1)\n', (22619, 22632), False, 'import torch\n'), ((1429, 1480), 'torch.arange', 'torch.arange', (['w'], {'dtype': 'torch.float32', 'device': 'device'}), '(w, dtype=torch.float32, device=device)\n', (1441, 1480), False, 'import torch\n'), ((1666, 1725), 'torch.arange', 'torch.arange', (['(w * 2 + 1)'], {'dtype': 'torch.float32', 'device': 'device'}), '(w * 2 + 1, dtype=torch.float32, device=device)\n', (1678, 1725), False, 'import torch\n'), ((9867, 9885), 'torch.arange', 'torch.arange', (['(0)', 'W'], {}), '(0, W)\n', (9879, 9885), False, 'import torch\n'), ((9922, 9940), 'torch.arange', 'torch.arange', (['(0)', 'H'], {}), '(0, H)\n', (9934, 9940), False, 'import torch\n'), ((22406, 22424), 'torch.arange', 'torch.arange', (['(0)', 'W'], {}), '(0, W)\n', (22418, 22424), False, 'import torch\n'), ((22461, 22479), 'torch.arange', 'torch.arange', (['(0)', 'H'], {}), '(0, H)\n', (22473, 22479), False, 'import torch\n')] |
import os
import ast
import pandas as pd
import numpy as np
import networkx as nx
import json
path = os.path.dirname(os.path.abspath(__file__))
def get_presynaptic(X, G, threshold=0.05):
""" Returns a list of predecessors for a group of neurons.
# Arguments
X (list): List of nodes to get the predecessors of.
G (networkx Graph): Connectivity graph to use.
threshold (float): Threshold to use for filtering.
# Returns
list: List of predecessors.
"""
predecessors = []
for x in X:
if x in G.nodes():
for i in G.predecessors(x):
if G.edges[i, x]['weight']>=threshold:
predecessors.append(i)
predecessors = list(set(predecessors))
return predecessors
def get_search(names, verb = 'show'):
""" Returns an NLP search query to retrieve neurons given a list of neurons.
# Arguments
names (list): List of neurons to retrieve.
verb (str): Verb to use. Defaults to 'show'. Can be 'show', 'add', 'keep' or 'remove'.
# Returns
str: NLP query string.
"""
_str = verb + ' /:referenceId:['+', '.join([str(i) for i in names])+']'
print(_str)
return _str
def get_set_color(names, color):
""" Returns an NLP search query to color neurons given a list of neurons.
# Arguments
names (list): List of neurons to color.
color (str): Color to use. Refer to the color name list in https://en.wikipedia.org/wiki/Web_colors.
# Returns
str: NLP query string.
"""
_str = 'color /:referenceId:['+', '.join([str(i) for i in names])+'] '+color
print(_str)
return _str
def load_normalized_hemibrain():
""" Returns an NLP search query to color neurons given a list of neurons.
# Returns
NetworkX Graph: Connectivity graph for Hemibrain in which edges are weighted based on percentage of inputs.
dict: Dict of neuron labels that can be searched with get_field or get_field_with_keys functions.
"""
G = nx.read_gexf('hemi12_G_normalized.gexf')
neuron_labels = np.load('hemi_neurons.npy', allow_pickle=True).item()
return G, neuron_labels
def search(inp, neuron_labels):
""" Utility function to retrieve neuron labels with a filter.
# Arguments
inp (str): Keyword to use for filtering.
neuron_labels (dict): Dictionary of neuron labels, for example generated with load_normalized_hemibrain.
# Returns
list: Returns names
"""
return [x for x,v in neuron_labels.items() if inp in v]
def threshold_graph(G, threshold=0.05):
""" Filter edges in a graph by some threshold.
# Arguments
G (NetworkX Graph): List of neurons to color.
threshold (float): Threshold value to use.
# Returns
NetworkX Graph: Threshold-filtered NetworkX graph.
"""
edge_iterator = list(G.edges(data=True))
edges_to_remove = []
for n1,n2,data in edge_iterator:
if data['weight']<threshold:
edges_to_remove.append((n1,n2))
for i in edges_to_remove:
G.remove_edge(i[0],i[1])
return G
def get_field(X, neuron_labels, i=0):
""" Return labels for a subset of neuron names.
# Arguments
X (list): List of neurons names to retrieve the field of.
neuron_labels (dict): Dictionary of neuron labels, for example generated with load_normalized_hemibrain.
i (int): Label field index to retrieve. Label indices in order: status, statusLabel, cropped, instance, notes, type.
# Returns
NetworkX Graph: Threshold-filtered NetworkX graph.
"""
return [neuron_labels[x].split('/')[i] for x in X]
def generate_synapse_data(file_path):
""" Generates precomputed synaptomics labels from Hemibrain data.
# Arguments
file_path (str): First list.
"""
skeleton_ids = [f.split('.')[0] for f in listdir(file_path) if isfile(join(file_path, f))]
meshes_to_use = ['bL(L)',
'g4(R)',
'SIP(L)',
'NO1(R)',
'EPA(L)',
'SLP(R)',
'GA(R)',
'aL(R)',
'PB(L5)',
'EBr5',
'EB',
"b'2(R)",
'FBl9',
'AB(R)',
'PLP(R)',
'GF(R)',
'PB(R5)',
'AOTU(R)',
'FBl6',
'CRE(R)',
'FB-column3',
"a'L(R)",
'SPS(L)',
'PB(L9)',
'AL(L)',
'b1(R)',
'VES(L)',
'ME(R)',
'FBl7',
'EPA(R)',
'EBr2r4',
"a'3(R)",
'SAD',
'NO',
'FBl3',
'NO1(L)',
'EBr1',
'WED(R)',
'BU(R)',
'SCL(R)',
'VES(R)',
'PB(R8)',
'PB(R9)',
"b'1(R)",
'IPS(R)',
'b2(R)',
'LOP(R)',
'g5(R)',
'BU(L)',
'FBl1',
'PB(R3)',
"b'L(R)",
'MB(R)',
'NO2(L)',
'RUB(R)',
"a'2(R)",
'PB(L1)',
'PVLP(R)',
'PB(L3)',
'a2(R)',
"a'L(L)",
'IB',
'mALT(L)',
'PB(L2)',
'aL(L)',
'NO2(R)',
"b'L(L)",
'SMP(R)',
'ICL(R)',
'AL-DC3(R)',
'PB(R1)',
'gL(L)',
'g1(R)',
'NO3(L)',
'AB(L)',
'gL(R)',
'PB(L8)',
'PRW',
'AVLP(R)',
'PB(R6)',
'FB',
'PB(L7)',
'FBl5',
'dACA(R)',
'RUB(L)',
'g3(R)',
'ROB(R)',
'NO3(R)',
'AMMC',
"a'1(R)",
'bL(R)',
'CA(L)',
'PB(L4)',
'PB',
'PB(R4)',
'SAD(-AMMC)',
'AME(R)',
'SCL(L)',
'LAL(R)',
'PB(L6)',
'vACA(R)',
'EBr3am',
'NO(R)',
'LO(R)',
'LAL(-GA)(R)',
'FBl4',
'a1(R)',
'FBl2',
'ICL(L)',
'EBr6',
'AOT(R)',
'g2(R)',
'EBr3d',
'CRE(L)',
'FLA(R)',
'POC',
'GOR(L)',
'MB(L)',
'ATL(R)',
'CAN(R)',
'LAL(L)',
'LH(R)',
'SIP(R)',
'GNG',
'SMP(L)',
'EBr3pw',
'a3(R)',
'SPS(R)',
'PED(R)',
'PB(R7)',
'AL(R)',
'PB(R2)',
'NO(L)',
'GC',
'CA(R)',
'GOR(R)',
'ATL(L)']
loc_scale = 0.001
node_to_id = {}
# Create the node-to-skeleton-id dict
# for i in range(len(skeleton_ids)):
# node_to_id[str(skeleton_ids[i])] = unames[i]
all_datas = []
for i in range(1,len(skeleton_ids)):
syns_batch = []
syn_n_batch = []
syn_xs_batch = []
syn_ys_batch = []
syn_zs_batch = []
bodyID = str(skeleton_ids[i])
data = pd.read_csv(file_path+'/{}.csv'.format(bodyID))
main_data = np.zeros((6,))
regions = np.zeros((len(meshes_to_use),))
for j in range(len(data)):
main_data = np.zeros((6,))
regions = np.zeros((len(meshes_to_use),))
main_data[0] = int(data['m.bodyId'][j])
main_data[1] = int(data['neuron.bodyId'][j])
syn = ast.literal_eval(data['syn'][j]) # this line is slow, care
coordinates = syn['location']['coordinates']
# name = str(presyn)+'--'+str(postsyn)
main_data[2] = syn['location']['coordinates'][0]
main_data[3] = syn['location']['coordinates'][1]
main_data[4] = syn['location']['coordinates'][2]
main_data[5] = syn['confidence']
for i in syn.keys():
if i in meshes_to_use:
regions[meshes_to_use.index(i)] = 1.
all_data = np.hstack((main_data, regions))
all_datas.append(all_data)
all_datas2 = np.array(all_datas)
print('Synapse Data Shape:', all_datas2.shape)
np.save('syn_data.npy', all_datas2)
def intersection(a, b):
""" Compute the intersection of two lists.
# Arguments
a (list): First list.
b (list): Second list.
# Returns
list: The intersection of the two lists.
"""
c = [x for x in a if x in b]
return c
class HemibrainAnalysis:
"""A class for analyzing a given database with some helper functions. Default files are provided for the Hemibrain dataset.
"""
def __init__(self):
"""Initializes a HemibrainAnalysis object.
"""
self.N = pd.read_csv('traced-neurons.csv')
self.B = pd.read_csv('synapse_list.csv')
self.N_list = list(self.N['bodyId'])
self.T = pd.read_csv('all_neurons_reference_fib.csv')
self.T = self.T[self.T['side'] == 'right']
self.T = self.T.reset_index(drop=True)
self.keys_to_neurons = {}
self.neurons_to_keys = {}
for i in range(len(self.T)):
self.keys_to_neurons[self.T.loc[i]['skeleton_id']] = self.T.loc[i]['uname']
self.neurons_to_keys[self.T.loc[i]['uname']] = self.T.loc[i]['skeleton_id']
def get_postsynaptic_partners(self, _id, N=0):
"""Get postsynaptic partners of a given neuron.
# Arguments:
_id : ID to use.
N: Synapse threshold to use.
# Returns:
list: List of postsynaptic partners.
"""
if _id in self.keys_to_neurons:
results = list(self.B[(self.B['N']>=N) & (self.B['presynaptic'] == self.keys_to_neurons[_id])]['postsynaptic'])
outs = []
for i in results:
if i in self.neurons_to_keys:
outs.append(self.neurons_to_keys[i])
else:
outs = []
print('ID {} is missing.'.format(str(_id)))
return outs
def get_all_postsynaptic_partners(self, ids, N=0):
"""Gets postsynaptic partners of a given set of neurons.
# Arguments:
list : list of IDs to use.
N: Synapse threshold to use.
# Returns:
list: List of postsynaptic partners.
"""
outs = []
for i in ids:
outs += self.get_postsynaptic_partners(i, N=N)
return outs
def get_presynaptic_partners(self, _id, N=0):
"""Get presynaptic partners of a given neuron.
# Arguments:
_id : ID to use.
N: Synapse threshold to use.
# Returns:
list: List of presynaptic partners.
"""
if _id in self.keys_to_neurons:
results = list(self.B[(self.B['N']>=N) & (self.B['postsynaptic'] == self.keys_to_neurons[_id])]['presynaptic'])
outs = []
for i in results:
if i in self.neurons_to_keys:
outs.append(self.neurons_to_keys[i])
else:
outs = []
print('ID {} is missing.'.format(str(_id)))
return outs
def get_all_presynaptic_partners(self, ids, N=0):
"""Gets presynaptic partners of a given set of neurons.
# Arguments:
list : list of IDs to use.
N: Synapse threshold to use.
# Returns:
list: List of presynaptic partners.
"""
outs = []
for i in ids:
outs += self.get_presynaptic_partners(i, N=N)
return outs
def get_graph(self, _id):
"""Retrieve a graph consisting of only neurons in a list.
# Arguments:
_id (list) : list of IDs to use.
# Returns:
pandas DataFrame: Graph only composed of the edges whose targets and sources are in the list of IDs.
"""
results = self.B[self.B['postsynaptic'].isin(_id)]
results = results[results['presynaptic'].isin(_id)]
return results
def get_type(self, _type, N=None):
"""Retrieve neurons whose type contains a specific substring.
# Arguments:
_type (str) : type to search.
# Returns:
pandas DataFrame: DataFrame with neurons for the query.
"""
if N is None:
results = self.N[self.N['type'].str.contains(_type, na=False)]
else:
results = N[N['type'].str.contains(_type, na=False)]
return results
def get_instance(self, instance, N=None, converter = None):
"""Retrieve neurons whose instance contains a specific substring.
# Arguments:
instance (str) : instance to search.
# Returns:
pandas DataFrame: DataFrame with neurons for the query.
"""
if N is None:
results = self.N[self.N['instance'].str.contains(instance, na=False)]
else:
results = N[N['instance'].str.contains(instance, na=False)]
if converter is not None:
results = converter(results)
return results
def to_id(self, x):
"""Converts body IDs to integers.
"""
return [int(i) for i in x['bodyId']]
def to_str_id(self, x):
"""Converts body IDs to strings.
"""
return [str(i) for i in x['bodyId']]
def load_hemibrain_synaptome():
"""Loads the hemibrain synaptome; useful for .
# Returns:
numpy array: A matrix in the NeuroSynapsis matrix format.
"""
return np.load('syn_data.npy'), np.load('hemibrain_volumes.npy', allow_pickle=True).item()
class Synaptome:
def __init__(self, X, regions = None, synapse_classes = None, confidence = True):
"""Initialize a synaptome object.
# Arguments:
X (numpy array): A matrix in the NeuroSynapsis matrix format.
i (int): Numeric ID of the neuron.
confidence (bool): Whether confidence values exist for the synapses. Optional.
"""
self.X = X
self.regions = regions
self.confidence = confidence
def find_postsynaptic(X, i):
"""Return postsynaptic partners of a neuron.
# Arguments:
X (numpy array): A matrix in the NeuroSynapsis matrix format.
i (int): Numeric ID of the neuron.
# Returns:
numpy array: A matrix in the NeuroSynapsis matrix format.
"""
vals = np.where(X[:,1] == i)[0]
return X[vals,:]
def find_presynaptic(X, i):
"""Return presynaptic partners of a neuron.
# Arguments:
X (numpy array): A matrix in the NeuroSynapsis matrix format.
i (int): Numeric ID of the neuron.
# Returns:
numpy array: A matrix in the NeuroSynapsis matrix format.
"""
vals = np.where(X[:,0] == i)[0]
return X[vals,:]
def filter_by_maximum(X, region, confidence = True):
"""Filter synapses by maximum.
# Arguments:
X (numpy array): A matrix in the NeuroSynapsis matrix format.
# Returns:
numpy array: A matrix in the NeuroSynapsis matrix format.
"""
vals = np.where((X[:,2] <= i[0])*(X[:,3] <= i[1])*(X[:,4] <= i[2]))[0]
return X[vals,:]
def filter_by_minimum(X, region):
"""Filter synapses by minimum.
# Arguments:
X (numpy array): A matrix in the NeuroSynapsis matrix format.
# Returns:
numpy array: A matrix in the NeuroSynapsis matrix format.
"""
vals = np.where((X[:,2] >= i[0])*(X[:,3] >= i[1])*(X[:,4] >= i[2]))[0]
return X[vals,:]
def filter_by_region(X, region, confidence = True):
"""Filter synapses by region.
# Arguments:
X (numpy array): A matrix in the NeuroSynapsis matrix format.
region (str): Name of the region to use.
# Returns:
numpy array: A matrix in the NeuroSynapsis matrix format.
"""
regions = ['bL(L)',
'g4(R)',
'SIP(L)',
'NO1(R)',
'EPA(L)',
'SLP(R)',
'GA(R)',
'aL(R)',
'PB(L5)',
'EBr5',
'EB',
"b'2(R)",
'FBl9',
'AB(R)',
'PLP(R)',
'GF(R)',
'PB(R5)',
'AOTU(R)',
'FBl6',
'CRE(R)',
'FB-column3',
"a'L(R)",
'SPS(L)',
'PB(L9)',
'AL(L)',
'b1(R)',
'VES(L)',
'ME(R)',
'FBl7',
'EPA(R)',
'EBr2r4',
"a'3(R)",
'SAD',
'NO',
'FBl3',
'NO1(L)',
'EBr1',
'WED(R)',
'BU(R)',
'SCL(R)',
'VES(R)',
'PB(R8)',
'PB(R9)',
"b'1(R)",
'IPS(R)',
'b2(R)',
'LOP(R)',
'g5(R)',
'BU(L)',
'FBl1',
'PB(R3)',
"b'L(R)",
'MB(R)',
'NO2(L)',
'RUB(R)',
"a'2(R)",
'PB(L1)',
'PVLP(R)',
'PB(L3)',
'a2(R)',
"a'L(L)",
'IB',
'mALT(L)',
'PB(L2)',
'aL(L)',
'NO2(R)',
"b'L(L)",
'SMP(R)',
'ICL(R)',
'AL-DC3(R)',
'PB(R1)',
'gL(L)',
'g1(R)',
'NO3(L)',
'AB(L)',
'gL(R)',
'PB(L8)',
'PRW',
'AVLP(R)',
'PB(R6)',
'FB',
'PB(L7)',
'FBl5',
'dACA(R)',
'RUB(L)',
'g3(R)',
'ROB(R)',
'NO3(R)',
'AMMC',
"a'1(R)",
'bL(R)',
'CA(L)',
'PB(L4)',
'PB',
'PB(R4)',
'SAD(-AMMC)',
'AME(R)',
'SCL(L)',
'LAL(R)',
'PB(L6)',
'vACA(R)',
'EBr3am',
'NO(R)',
'LO(R)',
'LAL(-GA)(R)',
'FBl4',
'a1(R)',
'FBl2',
'ICL(L)',
'EBr6',
'AOT(R)',
'g2(R)',
'EBr3d',
'CRE(L)',
'FLA(R)',
'POC',
'GOR(L)',
'MB(L)',
'ATL(R)',
'CAN(R)',
'LAL(L)',
'LH(R)',
'SIP(R)',
'GNG',
'SMP(L)',
'EBr3pw',
'a3(R)',
'SPS(R)',
'PED(R)',
'PB(R7)',
'AL(R)',
'PB(R2)',
'NO(L)',
'GC',
'CA(R)',
'GOR(R)',
'ATL(L)']
if region in regions:
k = regions.index(region)+5+confidence
vals = np.where(X[:,regions.index(region)+5+confidence] == 1.)[0]
return X[vals,:]
else:
print('Region not recognized.')
def elbow_kmeans_optimizer(X, k = None, kmin = 1, kmax = 5, visualize = True):
"""k-means clustering with or without automatically determined cluster numbers.
Reference: https://pyclustering.github.io/docs/0.8.2/html/d3/d70/classpyclustering_1_1cluster_1_1elbow_1_1elbow.html
# Arguments:
X (numpy array-like): Input data matrix.
kmin: Minimum number of clusters to consider. Defaults to 1.
kmax: Maximum number of clusters to consider. Defaults to 5.
visualize: Whether to perform k-means visualization or not.
# Returns:
numpy arraylike: Clusters.
numpy arraylike: Cluster centers.
"""
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import SIMPLE_SAMPLES
from pyclustering.cluster.kmeans import kmeans
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer, random_center_initializer
from pyclustering.core.wrapper import ccore_library
from pyclustering.cluster.elbow import elbow
from pyclustering.cluster.kmeans import kmeans_visualizer
import pyclustering.core.elbow_wrapper as wrapper
if k is not None:
amount_clusters = k
else:
elbow_instance = elbow(X, kmin, kmax)
elbow_instance.process()
amount_clusters = elbow_instance.get_amount()
wce = elbow_instance.get_wce()
centers = kmeans_plusplus_initializer(X, amount_clusters).initialize()
kmeans_instance = kmeans(X, centers)
kmeans_instance.process()
clusters = kmeans_instance.get_clusters()
centers = kmeans_instance.get_centers()
kmeans_visualizer.show_clusters(X, clusters, centers)
return clusters, centers
def return_synapse_positions(X):
"""Filters a NeuroSynapsis matrix and returns only the matrix of synapse positions.
# Arguments:
X (numpy array): A matrix in the NeuroSynapsis matrix format.
# Returns:
numpy array: A matrix of size samples x dimensions.
"""
return X[:,2:5]
def calculate_synapse_density(A, B):
"""Filters a NeuroSynapsis matrix and returns only the matrix of synapse positions.
# Arguments:
A (numpy array): A matrix in the NeuroSynapsis matrix format.
B (dict): A NeuroSynapsis volume object.
# Returns:
dict: Density of synapses, calculated as (# synapses)/(nm^3).
"""
densities = {}
for i, v in enumerate(list(B.keys())):
X = filter_by_region(A, v)
densities[v] = X.shape[0] / (B[v]*8*8*8)
return densities | [
"os.path.abspath",
"numpy.save",
"numpy.load",
"ast.literal_eval",
"pyclustering.cluster.center_initializer.kmeans_plusplus_initializer",
"pandas.read_csv",
"numpy.zeros",
"pyclustering.cluster.kmeans.kmeans",
"numpy.hstack",
"pyclustering.cluster.elbow.elbow",
"numpy.where",
"numpy.array",
... | [((118, 143), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (133, 143), False, 'import os\n'), ((2034, 2074), 'networkx.read_gexf', 'nx.read_gexf', (['"""hemi12_G_normalized.gexf"""'], {}), "('hemi12_G_normalized.gexf')\n", (2046, 2074), True, 'import networkx as nx\n'), ((9484, 9503), 'numpy.array', 'np.array', (['all_datas'], {}), '(all_datas)\n', (9492, 9503), True, 'import numpy as np\n'), ((9559, 9594), 'numpy.save', 'np.save', (['"""syn_data.npy"""', 'all_datas2'], {}), "('syn_data.npy', all_datas2)\n", (9566, 9594), True, 'import numpy as np\n'), ((22637, 22655), 'pyclustering.cluster.kmeans.kmeans', 'kmeans', (['X', 'centers'], {}), '(X, centers)\n', (22643, 22655), False, 'from pyclustering.cluster.kmeans import kmeans\n'), ((22780, 22833), 'pyclustering.cluster.kmeans.kmeans_visualizer.show_clusters', 'kmeans_visualizer.show_clusters', (['X', 'clusters', 'centers'], {}), '(X, clusters, centers)\n', (22811, 22833), False, 'from pyclustering.cluster.kmeans import kmeans_visualizer\n'), ((8475, 8489), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (8483, 8489), True, 'import numpy as np\n'), ((10143, 10176), 'pandas.read_csv', 'pd.read_csv', (['"""traced-neurons.csv"""'], {}), "('traced-neurons.csv')\n", (10154, 10176), True, 'import pandas as pd\n'), ((10194, 10225), 'pandas.read_csv', 'pd.read_csv', (['"""synapse_list.csv"""'], {}), "('synapse_list.csv')\n", (10205, 10225), True, 'import pandas as pd\n'), ((10288, 10332), 'pandas.read_csv', 'pd.read_csv', (['"""all_neurons_reference_fib.csv"""'], {}), "('all_neurons_reference_fib.csv')\n", (10299, 10332), True, 'import pandas as pd\n'), ((14929, 14952), 'numpy.load', 'np.load', (['"""syn_data.npy"""'], {}), "('syn_data.npy')\n", (14936, 14952), True, 'import numpy as np\n'), ((15819, 15841), 'numpy.where', 'np.where', (['(X[:, 1] == i)'], {}), '(X[:, 1] == i)\n', (15827, 15841), True, 'import numpy as np\n'), ((16186, 16208), 'numpy.where', 'np.where', (['(X[:, 0] == i)'], {}), '(X[:, 0] == i)\n', (16194, 16208), True, 'import numpy as np\n'), ((16522, 16589), 'numpy.where', 'np.where', (['((X[:, 2] <= i[0]) * (X[:, 3] <= i[1]) * (X[:, 4] <= i[2]))'], {}), '((X[:, 2] <= i[0]) * (X[:, 3] <= i[1]) * (X[:, 4] <= i[2]))\n', (16530, 16589), True, 'import numpy as np\n'), ((16878, 16945), 'numpy.where', 'np.where', (['((X[:, 2] >= i[0]) * (X[:, 3] >= i[1]) * (X[:, 4] >= i[2]))'], {}), '((X[:, 2] >= i[0]) * (X[:, 3] >= i[1]) * (X[:, 4] >= i[2]))\n', (16886, 16945), True, 'import numpy as np\n'), ((22392, 22412), 'pyclustering.cluster.elbow.elbow', 'elbow', (['X', 'kmin', 'kmax'], {}), '(X, kmin, kmax)\n', (22397, 22412), False, 'from pyclustering.cluster.elbow import elbow\n'), ((2095, 2141), 'numpy.load', 'np.load', (['"""hemi_neurons.npy"""'], {'allow_pickle': '(True)'}), "('hemi_neurons.npy', allow_pickle=True)\n", (2102, 2141), True, 'import numpy as np\n'), ((8608, 8622), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (8616, 8622), True, 'import numpy as np\n'), ((8817, 8849), 'ast.literal_eval', 'ast.literal_eval', (["data['syn'][j]"], {}), "(data['syn'][j])\n", (8833, 8849), False, 'import ast\n'), ((9395, 9426), 'numpy.hstack', 'np.hstack', (['(main_data, regions)'], {}), '((main_data, regions))\n', (9404, 9426), True, 'import numpy as np\n'), ((22554, 22601), 'pyclustering.cluster.center_initializer.kmeans_plusplus_initializer', 'kmeans_plusplus_initializer', (['X', 'amount_clusters'], {}), '(X, amount_clusters)\n', (22581, 22601), False, 'from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer, random_center_initializer\n'), ((14954, 15005), 'numpy.load', 'np.load', (['"""hemibrain_volumes.npy"""'], {'allow_pickle': '(True)'}), "('hemibrain_volumes.npy', allow_pickle=True)\n", (14961, 15005), True, 'import numpy as np\n')] |
import bayesiancoresets as bc
import numpy as np
import warnings
warnings.filterwarnings('ignore', category=UserWarning) #tests will generate warnings (due to pathological data design for testing), just ignore them
np.seterr(all='raise')
np.set_printoptions(linewidth=500)
np.random.seed(100)
Nsamps=10000000
#linear test function/grad, sampling fcn for th and corresponding expectation gram matrix
def sample_linear(N, D):
return np.random.randn(N, D)
def ll_linear(x, th):
return (x.dot(th[:,np.newaxis])).flatten()
def gll_linear(x, th, idx=None):
if idx is None:
return x
return x[:, idx]
def gram2_linear(x):
return x.dot(x.T)
def gramF_linear(x):
return x.dot(x.T)
#quadratic test function/grad, sampling fcn for th and corresponding expectation gram matrix
def sample_quad(N, D):
return np.random.randn(N, D)
def ll_quad(x, th):
return (0.5*(x.dot(th[:,np.newaxis]))**2).flatten()
def gll_quad(x, th, idx=None):
if idx is None:
return x.dot(th[:,np.newaxis])*x
return x.dot(th[:,np.newaxis]).T * x[:,idx]
def gram2_quad(x):
#init gram matrix
grm = np.zeros((x.shape[0], x.shape[0]))
irng = range(x.shape[1])
idxqs = [(i,j,k,l) for i in irng for j in irng for k in irng for l in irng]
#loop over all pairs of data
for m in range(x.shape[0]):
for n in range(x.shape[0]):
#loop over all index quartets
for i, j, k, l in idxqs:
idcs = np.array([i,j,k,l])
unq = np.unique(idcs)
nunq = unq.shape[0]
if nunq == 3 or nunq == 4:
continue
if nunq == 1:
grm[m,n] += 0.25*3*x[m,i]**2*x[n,i]**2
continue
#nunq == 2
if (idcs == unq[0]).sum() == 3 or (idcs == unq[1]).sum() == 3:
continue
#2 groups of 2
grm[m,n] += 0.25*x[m,i]*x[n,j]*x[m,k]*x[n,l]
return grm
def gramF_quad(x):
return (x.dot(x.T))**2
#evaluate the testing ll / gll functions to make sure there's no error in the tests themselves
def single_llgll(ll, gll, g2, gF, samp):
th = np.random.randn(2)
x = np.random.randn(3, 2)
#compute exact gradient
exact_grad = gll(x, th)
#make sure it has the right shape and the component evals are equal
assert exact_grad.shape == (3, 2), "error: grad has incorrect shape"
for i in range(2):
assert np.all(exact_grad[:,i] == gll(x, th, i)), "error: grad().component != grad(component)"
#compare the numerical grad
num_grad = np.zeros((3,2))
eps = 1e-9
for i in range(2):
thr = th.copy()
thr[i] += eps
thl = th.copy()
thl[i] -= eps
num_grad[:, i] = (ll(x, thr) - ll(x, thl))/(2*eps)
assert np.all(np.fabs(num_grad - exact_grad) < 1e-6), "error: numerical/exact gradients do not match up; max diff = " + str(np.fabs(num_grad-exact_grad).max())
#make sure the exact expected gram matrices are close to numerical values
exact_gram2 = g2(x)
exact_gramF = gF(x)
ths = samp(Nsamps, 2)
num_gram2 = np.zeros_like(exact_gram2)
num_gramF = np.zeros_like(exact_gramF)
for i in range(Nsamps):
lls = ll(x, ths[i, :])
glls = gll(x, ths[i, :])
num_gram2 += lls[:, np.newaxis]*lls
num_gramF += glls.dot(glls.T)
num_gram2 /= Nsamps
num_gramF /= Nsamps
assert np.all(np.fabs(num_gramF - exact_gramF) < 5e-2), "error: numerical/exact gramF matrices don't match up; max diff = " + str(np.fabs(num_gramF-exact_gramF).max())
assert np.all(np.fabs(num_gram2 - exact_gram2) < 5e-2), "error: numerical/exact gram2 matrices don't match up; max diff = " + str(np.fabs(num_gram2-exact_gram2).max())
def test_llgll():
for ll, gll, g2, gF, samp in [(ll_linear, gll_linear, gram2_linear, gramF_linear, sample_linear), (ll_quad, gll_quad, gram2_quad, gramF_quad, sample_quad)]:
yield single_llgll, ll, gll, g2, gF, samp
#test if the F projection converges to the expectation
def single_projF(gll, gram, samp):
x = samp(3, 2)
proj = bc.ProjectionF(x, gll, Nsamps, lambda : samp(1, 2).flatten())
w = proj.get()
assert np.all(np.fabs(gram(x) - w.dot(w.T)) < 1e-2), "error: projectionF doesn't converge to expectation; max diff = " + str(np.fabs(gram(x) - w.dot(w.T)).max())
proj.reset()
assert proj.get().shape == w.shape, "error: proj.reset() doesn't retain shape"
is_constant = True
gtest = gll(x, samp(1, 2).flatten())
for i in range(10):
gtest2 = gll(x, samp(1,2).flatten())
if np.any(gtest2 != gtest):
is_constant = False
if not is_constant:
assert np.all(np.fabs(w - proj.get()) > 0), "error: proj.reset() doesn't refresh entries"
proj.reset(5)
assert proj.get().shape[1] == 5, "error: proj.reset(5) doesn't create a new projection with 5 components"
#test if 2 projection converges to its expectation
def single_proj2(ll, gram, samp):
x = samp(3, 2)
proj = bc.Projection2(x, ll, Nsamps, lambda : samp(1, 2).flatten())
w = proj.get()
assert np.all(np.fabs(gram(x) - w.dot(w.T)) < 1e-2), "error: projection2 doesn't converge to expectation; max diff = " + str(np.fabs(gram(x) - w.dot(w.T)).max())
proj.reset()
assert proj.get().shape == w.shape, "error: proj.reset() doesn't retain shape"
is_constant = True
ltest = ll(x, samp(1, 2).flatten())
for i in range(10):
ltest2 = ll(x, samp(1,2).flatten())
if np.any(ltest2 != ltest):
is_constant = False
if not is_constant:
assert np.all(np.fabs(w - proj.get()) > 0), "error: proj.reset() doesn't refresh entries"
proj.reset(5)
assert proj.get().shape[1] == 5, "error: proj.reset(5) doesn't create a new projection with 5 components"
def test_proj():
for ll, gll, g2, gF, samp in [(ll_linear, gll_linear, gram2_linear, gramF_linear, sample_linear), (ll_quad, gll_quad, gram2_quad, gramF_quad, sample_quad)]:
yield single_projF, gll, gF, samp
yield single_proj2, ll, g2, samp
| [
"numpy.set_printoptions",
"numpy.random.seed",
"numpy.zeros_like",
"numpy.random.randn",
"numpy.seterr",
"warnings.filterwarnings",
"numpy.zeros",
"numpy.any",
"numpy.fabs",
"numpy.array",
"numpy.unique"
] | [((67, 122), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (90, 122), False, 'import warnings\n'), ((217, 239), 'numpy.seterr', 'np.seterr', ([], {'all': '"""raise"""'}), "(all='raise')\n", (226, 239), True, 'import numpy as np\n'), ((240, 274), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(500)'}), '(linewidth=500)\n', (259, 274), True, 'import numpy as np\n'), ((275, 294), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (289, 294), True, 'import numpy as np\n'), ((436, 457), 'numpy.random.randn', 'np.random.randn', (['N', 'D'], {}), '(N, D)\n', (451, 457), True, 'import numpy as np\n'), ((822, 843), 'numpy.random.randn', 'np.random.randn', (['N', 'D'], {}), '(N, D)\n', (837, 843), True, 'import numpy as np\n'), ((1100, 1134), 'numpy.zeros', 'np.zeros', (['(x.shape[0], x.shape[0])'], {}), '((x.shape[0], x.shape[0]))\n', (1108, 1134), True, 'import numpy as np\n'), ((2028, 2046), 'numpy.random.randn', 'np.random.randn', (['(2)'], {}), '(2)\n', (2043, 2046), True, 'import numpy as np\n'), ((2053, 2074), 'numpy.random.randn', 'np.random.randn', (['(3)', '(2)'], {}), '(3, 2)\n', (2068, 2074), True, 'import numpy as np\n'), ((2430, 2446), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {}), '((3, 2))\n', (2438, 2446), True, 'import numpy as np\n'), ((2931, 2957), 'numpy.zeros_like', 'np.zeros_like', (['exact_gram2'], {}), '(exact_gram2)\n', (2944, 2957), True, 'import numpy as np\n'), ((2972, 2998), 'numpy.zeros_like', 'np.zeros_like', (['exact_gramF'], {}), '(exact_gramF)\n', (2985, 2998), True, 'import numpy as np\n'), ((4354, 4377), 'numpy.any', 'np.any', (['(gtest2 != gtest)'], {}), '(gtest2 != gtest)\n', (4360, 4377), True, 'import numpy as np\n'), ((5226, 5249), 'numpy.any', 'np.any', (['(ltest2 != ltest)'], {}), '(ltest2 != ltest)\n', (5232, 5249), True, 'import numpy as np\n'), ((2627, 2657), 'numpy.fabs', 'np.fabs', (['(num_grad - exact_grad)'], {}), '(num_grad - exact_grad)\n', (2634, 2657), True, 'import numpy as np\n'), ((3215, 3247), 'numpy.fabs', 'np.fabs', (['(num_gramF - exact_gramF)'], {}), '(num_gramF - exact_gramF)\n', (3222, 3247), True, 'import numpy as np\n'), ((3385, 3417), 'numpy.fabs', 'np.fabs', (['(num_gram2 - exact_gram2)'], {}), '(num_gram2 - exact_gram2)\n', (3392, 3417), True, 'import numpy as np\n'), ((1415, 1437), 'numpy.array', 'np.array', (['[i, j, k, l]'], {}), '([i, j, k, l])\n', (1423, 1437), True, 'import numpy as np\n'), ((1449, 1464), 'numpy.unique', 'np.unique', (['idcs'], {}), '(idcs)\n', (1458, 1464), True, 'import numpy as np\n'), ((2737, 2767), 'numpy.fabs', 'np.fabs', (['(num_grad - exact_grad)'], {}), '(num_grad - exact_grad)\n', (2744, 2767), True, 'import numpy as np\n'), ((3331, 3363), 'numpy.fabs', 'np.fabs', (['(num_gramF - exact_gramF)'], {}), '(num_gramF - exact_gramF)\n', (3338, 3363), True, 'import numpy as np\n'), ((3501, 3533), 'numpy.fabs', 'np.fabs', (['(num_gram2 - exact_gram2)'], {}), '(num_gram2 - exact_gram2)\n', (3508, 3533), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from keras.models import load_model
from statistics import mode
from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import draw_text
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.preprocessor import preprocess_input
USE_WEBCAM = False # If false, loads video file source
# parameters for loading data and images
emotion_model_path = '/models/emotion_model.hdf5'
emotion_labels = get_labels('fer2013')
# hyper-parameters for bounding boxes shape
frame_window = 10
emotion_offsets = (20, 40)
# loading models
face_cascade = cv2.CascadeClassifier('/models/haarcascade_frontalface_default.xml')
emotion_classifier = load_model(emotion_model_path)
# getting input model shapes for inference
emotion_target_size = emotion_classifier.input_shape[1:3]
# starting lists for calculating modes
emotion_window = []
# starting video streaming
cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture(0)
# Select video or webcam feed
cap = None
if (USE_WEBCAM == True):
cap = cv2.VideoCapture(0) # Webcam source
else:
cap = cv2.VideoCapture('/demo/dinner.mp4') # Video file source
while cap.isOpened(): # True:
ret, bgr_image = cap.read()
#bgr_image = video_capture.read()[1]
gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5,
minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)
for face_coordinates in faces:
x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
gray_face = gray_image[y1:y2, x1:x2]
try:
gray_face = cv2.resize(gray_face, (emotion_target_size))
except:
continue
gray_face = preprocess_input(gray_face, True)
gray_face = np.expand_dims(gray_face, 0)
gray_face = np.expand_dims(gray_face, -1)
emotion_prediction = emotion_classifier.predict(gray_face)
emotion_probability = np.max(emotion_prediction)
emotion_label_arg = np.argmax(emotion_prediction)
emotion_text = emotion_labels[emotion_label_arg]
emotion_window.append(emotion_text)
if len(emotion_window) > frame_window:
emotion_window.pop(0)
try:
emotion_mode = mode(emotion_window)
except:
continue
if emotion_text == 'angry':
color = emotion_probability * np.asarray((255, 0, 0))
elif emotion_text == 'sad':
color = emotion_probability * np.asarray((0, 0, 255))
elif emotion_text == 'happy':
color = emotion_probability * np.asarray((255, 255, 0))
elif emotion_text == 'surprise':
color = emotion_probability * np.asarray((0, 255, 255))
else:
color = emotion_probability * np.asarray((0, 255, 0))
color = color.astype(int)
color = color.tolist()
draw_bounding_box(face_coordinates, rgb_image, color)
draw_text(face_coordinates, rgb_image, emotion_mode,
color, 0, -45, 1, 1)
bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
cv2.imshow('window_frame', bgr_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"keras.models.load_model",
"numpy.argmax",
"cv2.imshow",
"cv2.cvtColor",
"numpy.max",
"utils.inference.apply_offsets",
"statistics.mode",
"cv2.destroyAllWindows",
"cv2.resize",
"utils.preprocessor.preprocess_input",
"utils.inference.draw_bounding_box",
"cv2.waitKey",
"numpy.asarray",
"util... | [((561, 582), 'utils.datasets.get_labels', 'get_labels', (['"""fer2013"""'], {}), "('fer2013')\n", (571, 582), False, 'from utils.datasets import get_labels\n'), ((706, 774), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""/models/haarcascade_frontalface_default.xml"""'], {}), "('/models/haarcascade_frontalface_default.xml')\n", (727, 774), False, 'import cv2\n'), ((796, 826), 'keras.models.load_model', 'load_model', (['emotion_model_path'], {}), '(emotion_model_path)\n', (806, 826), False, 'from keras.models import load_model\n'), ((1018, 1049), 'cv2.namedWindow', 'cv2.namedWindow', (['"""window_frame"""'], {}), "('window_frame')\n", (1033, 1049), False, 'import cv2\n'), ((1066, 1085), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1082, 1085), False, 'import cv2\n'), ((3432, 3455), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3453, 3455), False, 'import cv2\n'), ((1163, 1182), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1179, 1182), False, 'import cv2\n'), ((1215, 1251), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""/demo/dinner.mp4"""'], {}), "('/demo/dinner.mp4')\n", (1231, 1251), False, 'import cv2\n'), ((1395, 1438), 'cv2.cvtColor', 'cv2.cvtColor', (['bgr_image', 'cv2.COLOR_BGR2GRAY'], {}), '(bgr_image, cv2.COLOR_BGR2GRAY)\n', (1407, 1438), False, 'import cv2\n'), ((1455, 1497), 'cv2.cvtColor', 'cv2.cvtColor', (['bgr_image', 'cv2.COLOR_BGR2RGB'], {}), '(bgr_image, cv2.COLOR_BGR2RGB)\n', (1467, 1497), False, 'import cv2\n'), ((3276, 3318), 'cv2.cvtColor', 'cv2.cvtColor', (['rgb_image', 'cv2.COLOR_RGB2BGR'], {}), '(rgb_image, cv2.COLOR_RGB2BGR)\n', (3288, 3318), False, 'import cv2\n'), ((3323, 3360), 'cv2.imshow', 'cv2.imshow', (['"""window_frame"""', 'bgr_image'], {}), "('window_frame', bgr_image)\n", (3333, 3360), False, 'import cv2\n'), ((1700, 1748), 'utils.inference.apply_offsets', 'apply_offsets', (['face_coordinates', 'emotion_offsets'], {}), '(face_coordinates, emotion_offsets)\n', (1713, 1748), False, 'from utils.inference import apply_offsets\n'), ((1934, 1967), 'utils.preprocessor.preprocess_input', 'preprocess_input', (['gray_face', '(True)'], {}), '(gray_face, True)\n', (1950, 1967), False, 'from utils.preprocessor import preprocess_input\n'), ((1988, 2016), 'numpy.expand_dims', 'np.expand_dims', (['gray_face', '(0)'], {}), '(gray_face, 0)\n', (2002, 2016), True, 'import numpy as np\n'), ((2037, 2066), 'numpy.expand_dims', 'np.expand_dims', (['gray_face', '(-1)'], {}), '(gray_face, -1)\n', (2051, 2066), True, 'import numpy as np\n'), ((2164, 2190), 'numpy.max', 'np.max', (['emotion_prediction'], {}), '(emotion_prediction)\n', (2170, 2190), True, 'import numpy as np\n'), ((2219, 2248), 'numpy.argmax', 'np.argmax', (['emotion_prediction'], {}), '(emotion_prediction)\n', (2228, 2248), True, 'import numpy as np\n'), ((3105, 3158), 'utils.inference.draw_bounding_box', 'draw_bounding_box', (['face_coordinates', 'rgb_image', 'color'], {}), '(face_coordinates, rgb_image, color)\n', (3122, 3158), False, 'from utils.inference import draw_bounding_box\n'), ((3167, 3240), 'utils.inference.draw_text', 'draw_text', (['face_coordinates', 'rgb_image', 'emotion_mode', 'color', '(0)', '(-45)', '(1)', '(1)'], {}), '(face_coordinates, rgb_image, emotion_mode, color, 0, -45, 1, 1)\n', (3176, 3240), False, 'from utils.inference import draw_text\n'), ((1831, 1873), 'cv2.resize', 'cv2.resize', (['gray_face', 'emotion_target_size'], {}), '(gray_face, emotion_target_size)\n', (1841, 1873), False, 'import cv2\n'), ((2472, 2492), 'statistics.mode', 'mode', (['emotion_window'], {}), '(emotion_window)\n', (2476, 2492), False, 'from statistics import mode\n'), ((3368, 3382), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3379, 3382), False, 'import cv2\n'), ((2609, 2632), 'numpy.asarray', 'np.asarray', (['(255, 0, 0)'], {}), '((255, 0, 0))\n', (2619, 2632), True, 'import numpy as np\n'), ((2711, 2734), 'numpy.asarray', 'np.asarray', (['(0, 0, 255)'], {}), '((0, 0, 255))\n', (2721, 2734), True, 'import numpy as np\n'), ((2815, 2840), 'numpy.asarray', 'np.asarray', (['(255, 255, 0)'], {}), '((255, 255, 0))\n', (2825, 2840), True, 'import numpy as np\n'), ((2924, 2949), 'numpy.asarray', 'np.asarray', (['(0, 255, 255)'], {}), '((0, 255, 255))\n', (2934, 2949), True, 'import numpy as np\n'), ((3006, 3029), 'numpy.asarray', 'np.asarray', (['(0, 255, 0)'], {}), '((0, 255, 0))\n', (3016, 3029), True, 'import numpy as np\n')] |
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
# Berlin, 14195 Berlin, Germany.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Test the save_trajs function of the coordinates API by comparing
the direct, sequential retrieval of frames via mdtraj.load_frame() vs
the retrival via save_trajs
@author: gph82, clonker
"""
import unittest
import os
import shutil
import tempfile
import numpy as np
import pyemma.coordinates as coor
from pyemma.coordinates.data.util.reader_utils import single_traj_from_n_files, save_traj_w_md_load_frame, \
compare_coords_md_trajectory_objects
from pyemma.coordinates.api import save_trajs
class TestSaveTrajs(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestSaveTrajs, cls).setUpClass()
def setUp(self):
self.eps = 1e-10
path = os.path.join(os.path.split(__file__)[0], 'data')
self.pdbfile = os.path.join(path, 'bpti_ca.pdb')
self.trajfiles = [os.path.join(path, 'bpti_001-033.xtc'),
os.path.join(path, 'bpti_034-066.xtc'),
os.path.join(path, 'bpti_067-100.xtc')
]
# Create random sets of files and frames to be retrieved from trajfiles
n_members_set1 = 10
n_members_set2 = 20
set_1 = np.vstack((np.random.permutation([0, 2] * n_members_set1)[:n_members_set1],
np.random.randint(32, size=n_members_set1))).T
set_2 = np.vstack((np.random.permutation([0, 2] * n_members_set2)[:n_members_set2],
np.random.randint(32, size=n_members_set2))).T
self.sets = [set_1, set_2]
self.subdir = tempfile.mkdtemp(suffix='save_trajs_test/')
# Instantiate the reader
self.reader = coor.source(self.trajfiles, top=self.pdbfile)
self.reader.chunksize = 30
self.n_pass_files = [self.subdir + 'n_pass.set_%06u.xtc' % ii for ii in xrange(len(self.sets))]
self.one_pass_files = [self.subdir + '1_pass.set_%06u.xtc' % ii for ii in xrange(len(self.sets))]
self.traj_ref = save_traj_w_md_load_frame(self.reader, self.sets)
self.strides = [2, 3, 5]
def tearDown(self):
shutil.rmtree(self.subdir, ignore_errors=True)
def test_save_SaveTrajs_IO(self):
# Test that we're saving to disk alright
flist = save_trajs(self.reader, self.sets, prefix=self.subdir)
exist = True
for f in flist:
exist = exist and os.stat(f)
self.assertTrue(exist, "Could not write to disk")
def test_save_SaveTrajs_multipass(self):
# Without the "inmemory" option, i.e. multipass
__ = save_trajs(self.reader, self.sets,
outfiles=self.n_pass_files)
# Reload the object to memory
traj_n_pass = single_traj_from_n_files(self.n_pass_files, top=self.pdbfile)
# Check for diffs
(found_diff, errmsg) = compare_coords_md_trajectory_objects(traj_n_pass, self.traj_ref, atom=0)
self.assertFalse(found_diff, errmsg)
def test_save_SaveTrajs_onepass(self):
# With the inmemory option = True
__ = save_trajs(self.reader, self.sets,
outfiles=self.one_pass_files, inmemory=True)
traj_1_pass = single_traj_from_n_files(self.one_pass_files, top=self.pdbfile)
# Check for diffs
(found_diff, errmsg) = compare_coords_md_trajectory_objects(traj_1_pass, self.traj_ref, atom=0)
self.assertFalse(found_diff, errmsg)
def test_save_SaveTrajs_onepass_with_stride(self):
# With the inmemory option = True
for stride in self.strides[:]:
# Since none of the trajfiles have more than 30 frames, the frames have to be re-drawn for every stride
sets = np.copy(self.sets)
sets[0][:, 1] = np.random.randint(0, high=30 / stride, size=np.shape(sets[0])[0])
sets[1][:, 1] = np.random.randint(0, high=30 / stride, size=np.shape(sets[1])[0])
__ = save_trajs(self.reader, sets,
outfiles=self.one_pass_files, inmemory=True, stride=stride)
traj_1_pass = single_traj_from_n_files(self.one_pass_files, top=self.pdbfile)
# Also the reference has to be re-drawn using the stride. For this, we use the re-scale the strided
# frame-indexes to the unstrided value
sets[0][:, 1] *= stride
sets[1][:, 1] *= stride
traj_ref = save_traj_w_md_load_frame(self.reader, sets)
# Check for diffs
(found_diff, errmsg) = compare_coords_md_trajectory_objects(traj_1_pass, traj_ref, atom=0)
self.assertFalse(found_diff, errmsg)
def test_save_SaveTrajs_multipass_with_stride(self):
# With the inmemory option = True
for stride in self.strides[:]:
# Since none of the trajfiles have more than 30 frames, the frames have to be re-drawn for every stride
sets = np.copy(self.sets)
sets[0][:, 1] = np.random.randint(0, high=30 / stride, size=np.shape(sets[0])[0])
sets[1][:, 1] = np.random.randint(0, high=30 / stride, size=np.shape(sets[1])[0])
__ = save_trajs(self.reader, sets,
outfiles=self.one_pass_files, inmemory=False, stride=stride)
traj_1_pass = single_traj_from_n_files(self.one_pass_files, top=self.pdbfile)
# Also the reference has to be re-drawn using the stride. For this, we use the re-scale the strided
# frame-indexes to the unstrided value
sets[0][:, 1] *= stride
sets[1][:, 1] *= stride
traj_ref = save_traj_w_md_load_frame(self.reader, sets)
# Check for diffs
(found_diff, errmsg) = compare_coords_md_trajectory_objects(traj_1_pass, traj_ref, atom=0)
self.assertFalse(found_diff, errmsg)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"pyemma.coordinates.data.util.reader_utils.compare_coords_md_trajectory_objects",
"numpy.copy",
"os.stat",
"pyemma.coordinates.data.util.reader_utils.save_traj_w_md_load_frame",
"pyemma.coordinates.source",
"pyemma.coordinates.api.save_trajs",
"numpy.shape",
"tempfile.mkdtemp",
"n... | [((7244, 7259), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7257, 7259), False, 'import unittest\n'), ((2181, 2214), 'os.path.join', 'os.path.join', (['path', '"""bpti_ca.pdb"""'], {}), "(path, 'bpti_ca.pdb')\n", (2193, 2214), False, 'import os\n'), ((2969, 3012), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""save_trajs_test/"""'}), "(suffix='save_trajs_test/')\n", (2985, 3012), False, 'import tempfile\n'), ((3069, 3114), 'pyemma.coordinates.source', 'coor.source', (['self.trajfiles'], {'top': 'self.pdbfile'}), '(self.trajfiles, top=self.pdbfile)\n', (3080, 3114), True, 'import pyemma.coordinates as coor\n'), ((3385, 3434), 'pyemma.coordinates.data.util.reader_utils.save_traj_w_md_load_frame', 'save_traj_w_md_load_frame', (['self.reader', 'self.sets'], {}), '(self.reader, self.sets)\n', (3410, 3434), False, 'from pyemma.coordinates.data.util.reader_utils import single_traj_from_n_files, save_traj_w_md_load_frame, compare_coords_md_trajectory_objects\n'), ((3501, 3547), 'shutil.rmtree', 'shutil.rmtree', (['self.subdir'], {'ignore_errors': '(True)'}), '(self.subdir, ignore_errors=True)\n', (3514, 3547), False, 'import shutil\n'), ((3652, 3706), 'pyemma.coordinates.api.save_trajs', 'save_trajs', (['self.reader', 'self.sets'], {'prefix': 'self.subdir'}), '(self.reader, self.sets, prefix=self.subdir)\n', (3662, 3706), False, 'from pyemma.coordinates.api import save_trajs\n'), ((3967, 4029), 'pyemma.coordinates.api.save_trajs', 'save_trajs', (['self.reader', 'self.sets'], {'outfiles': 'self.n_pass_files'}), '(self.reader, self.sets, outfiles=self.n_pass_files)\n', (3977, 4029), False, 'from pyemma.coordinates.api import save_trajs\n'), ((4115, 4176), 'pyemma.coordinates.data.util.reader_utils.single_traj_from_n_files', 'single_traj_from_n_files', (['self.n_pass_files'], {'top': 'self.pdbfile'}), '(self.n_pass_files, top=self.pdbfile)\n', (4139, 4176), False, 'from pyemma.coordinates.data.util.reader_utils import single_traj_from_n_files, save_traj_w_md_load_frame, compare_coords_md_trajectory_objects\n'), ((4235, 4307), 'pyemma.coordinates.data.util.reader_utils.compare_coords_md_trajectory_objects', 'compare_coords_md_trajectory_objects', (['traj_n_pass', 'self.traj_ref'], {'atom': '(0)'}), '(traj_n_pass, self.traj_ref, atom=0)\n', (4271, 4307), False, 'from pyemma.coordinates.data.util.reader_utils import single_traj_from_n_files, save_traj_w_md_load_frame, compare_coords_md_trajectory_objects\n'), ((4454, 4533), 'pyemma.coordinates.api.save_trajs', 'save_trajs', (['self.reader', 'self.sets'], {'outfiles': 'self.one_pass_files', 'inmemory': '(True)'}), '(self.reader, self.sets, outfiles=self.one_pass_files, inmemory=True)\n', (4464, 4533), False, 'from pyemma.coordinates.api import save_trajs\n'), ((4581, 4644), 'pyemma.coordinates.data.util.reader_utils.single_traj_from_n_files', 'single_traj_from_n_files', (['self.one_pass_files'], {'top': 'self.pdbfile'}), '(self.one_pass_files, top=self.pdbfile)\n', (4605, 4644), False, 'from pyemma.coordinates.data.util.reader_utils import single_traj_from_n_files, save_traj_w_md_load_frame, compare_coords_md_trajectory_objects\n'), ((4703, 4775), 'pyemma.coordinates.data.util.reader_utils.compare_coords_md_trajectory_objects', 'compare_coords_md_trajectory_objects', (['traj_1_pass', 'self.traj_ref'], {'atom': '(0)'}), '(traj_1_pass, self.traj_ref, atom=0)\n', (4739, 4775), False, 'from pyemma.coordinates.data.util.reader_utils import single_traj_from_n_files, save_traj_w_md_load_frame, compare_coords_md_trajectory_objects\n'), ((2241, 2279), 'os.path.join', 'os.path.join', (['path', '"""bpti_001-033.xtc"""'], {}), "(path, 'bpti_001-033.xtc')\n", (2253, 2279), False, 'import os\n'), ((2307, 2345), 'os.path.join', 'os.path.join', (['path', '"""bpti_034-066.xtc"""'], {}), "(path, 'bpti_034-066.xtc')\n", (2319, 2345), False, 'import os\n'), ((2373, 2411), 'os.path.join', 'os.path.join', (['path', '"""bpti_067-100.xtc"""'], {}), "(path, 'bpti_067-100.xtc')\n", (2385, 2411), False, 'import os\n'), ((5094, 5112), 'numpy.copy', 'np.copy', (['self.sets'], {}), '(self.sets)\n', (5101, 5112), True, 'import numpy as np\n'), ((5319, 5412), 'pyemma.coordinates.api.save_trajs', 'save_trajs', (['self.reader', 'sets'], {'outfiles': 'self.one_pass_files', 'inmemory': '(True)', 'stride': 'stride'}), '(self.reader, sets, outfiles=self.one_pass_files, inmemory=True,\n stride=stride)\n', (5329, 5412), False, 'from pyemma.coordinates.api import save_trajs\n'), ((5464, 5527), 'pyemma.coordinates.data.util.reader_utils.single_traj_from_n_files', 'single_traj_from_n_files', (['self.one_pass_files'], {'top': 'self.pdbfile'}), '(self.one_pass_files, top=self.pdbfile)\n', (5488, 5527), False, 'from pyemma.coordinates.data.util.reader_utils import single_traj_from_n_files, save_traj_w_md_load_frame, compare_coords_md_trajectory_objects\n'), ((5787, 5831), 'pyemma.coordinates.data.util.reader_utils.save_traj_w_md_load_frame', 'save_traj_w_md_load_frame', (['self.reader', 'sets'], {}), '(self.reader, sets)\n', (5812, 5831), False, 'from pyemma.coordinates.data.util.reader_utils import single_traj_from_n_files, save_traj_w_md_load_frame, compare_coords_md_trajectory_objects\n'), ((5898, 5965), 'pyemma.coordinates.data.util.reader_utils.compare_coords_md_trajectory_objects', 'compare_coords_md_trajectory_objects', (['traj_1_pass', 'traj_ref'], {'atom': '(0)'}), '(traj_1_pass, traj_ref, atom=0)\n', (5934, 5965), False, 'from pyemma.coordinates.data.util.reader_utils import single_traj_from_n_files, save_traj_w_md_load_frame, compare_coords_md_trajectory_objects\n'), ((6290, 6308), 'numpy.copy', 'np.copy', (['self.sets'], {}), '(self.sets)\n', (6297, 6308), True, 'import numpy as np\n'), ((6515, 6609), 'pyemma.coordinates.api.save_trajs', 'save_trajs', (['self.reader', 'sets'], {'outfiles': 'self.one_pass_files', 'inmemory': '(False)', 'stride': 'stride'}), '(self.reader, sets, outfiles=self.one_pass_files, inmemory=False,\n stride=stride)\n', (6525, 6609), False, 'from pyemma.coordinates.api import save_trajs\n'), ((6661, 6724), 'pyemma.coordinates.data.util.reader_utils.single_traj_from_n_files', 'single_traj_from_n_files', (['self.one_pass_files'], {'top': 'self.pdbfile'}), '(self.one_pass_files, top=self.pdbfile)\n', (6685, 6724), False, 'from pyemma.coordinates.data.util.reader_utils import single_traj_from_n_files, save_traj_w_md_load_frame, compare_coords_md_trajectory_objects\n'), ((6984, 7028), 'pyemma.coordinates.data.util.reader_utils.save_traj_w_md_load_frame', 'save_traj_w_md_load_frame', (['self.reader', 'sets'], {}), '(self.reader, sets)\n', (7009, 7028), False, 'from pyemma.coordinates.data.util.reader_utils import single_traj_from_n_files, save_traj_w_md_load_frame, compare_coords_md_trajectory_objects\n'), ((7095, 7162), 'pyemma.coordinates.data.util.reader_utils.compare_coords_md_trajectory_objects', 'compare_coords_md_trajectory_objects', (['traj_1_pass', 'traj_ref'], {'atom': '(0)'}), '(traj_1_pass, traj_ref, atom=0)\n', (7131, 7162), False, 'from pyemma.coordinates.data.util.reader_utils import single_traj_from_n_files, save_traj_w_md_load_frame, compare_coords_md_trajectory_objects\n'), ((2122, 2145), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (2135, 2145), False, 'import os\n'), ((3782, 3792), 'os.stat', 'os.stat', (['f'], {}), '(f)\n', (3789, 3792), False, 'import os\n'), ((2696, 2738), 'numpy.random.randint', 'np.random.randint', (['(32)'], {'size': 'n_members_set1'}), '(32, size=n_members_set1)\n', (2713, 2738), True, 'import numpy as np\n'), ((2863, 2905), 'numpy.random.randint', 'np.random.randint', (['(32)'], {'size': 'n_members_set2'}), '(32, size=n_members_set2)\n', (2880, 2905), True, 'import numpy as np\n'), ((2604, 2650), 'numpy.random.permutation', 'np.random.permutation', (['([0, 2] * n_members_set1)'], {}), '([0, 2] * n_members_set1)\n', (2625, 2650), True, 'import numpy as np\n'), ((2771, 2817), 'numpy.random.permutation', 'np.random.permutation', (['([0, 2] * n_members_set2)'], {}), '([0, 2] * n_members_set2)\n', (2792, 2817), True, 'import numpy as np\n'), ((5185, 5202), 'numpy.shape', 'np.shape', (['sets[0]'], {}), '(sets[0])\n', (5193, 5202), True, 'import numpy as np\n'), ((5279, 5296), 'numpy.shape', 'np.shape', (['sets[1]'], {}), '(sets[1])\n', (5287, 5296), True, 'import numpy as np\n'), ((6381, 6398), 'numpy.shape', 'np.shape', (['sets[0]'], {}), '(sets[0])\n', (6389, 6398), True, 'import numpy as np\n'), ((6475, 6492), 'numpy.shape', 'np.shape', (['sets[1]'], {}), '(sets[1])\n', (6483, 6492), True, 'import numpy as np\n')] |
from typing import Callable, Optional, List
from collections import namedtuple
import numpy as np
from easydict import EasyDict
from ding.utils import import_module, PLAYER_REGISTRY
from .algorithm import pfsp
class Player:
"""
Overview:
Base player class, player is the basic member of a league
Interfaces:
__init__
Property:
race, payoff, checkpoint_path, player_id, total_agent_step
"""
_name = "BasePlayer" # override this variable for sub-class player
def __init__(
self,
cfg: EasyDict,
category: str,
init_payoff: 'BattleSharedPayoff', # noqa
checkpoint_path: str,
player_id: str,
total_agent_step: int
) -> None:
"""
Overview:
Initialize base player metadata
Arguments:
- cfg (:obj:`EasyDict`): Player config dict.
- category (:obj:`str`): Player category, depending on the game, \
e.g. StarCraft has 3 races ['terran', 'protoss', 'zerg'].
- init_payoff (:obj:`Union[BattleSharedPayoff, SoloSharedPayoff]`): Payoff shared by all players.
- checkpoint_path (:obj:`str`): The path to load player checkpoint.
- player_id (:obj:`str`): Player id in string format.
- total_agent_step (:obj:`int`): For active player, it should be 0; \
For historical player, it should be parent player's ``_total_agent_step`` when ``snapshot``.
"""
self._cfg = cfg
self._category = category
self._payoff = init_payoff
self._checkpoint_path = checkpoint_path
assert isinstance(player_id, str)
self._player_id = player_id
assert isinstance(total_agent_step, int), (total_agent_step, type(total_agent_step))
self._total_agent_step = total_agent_step
@property
def category(self) -> str:
return self._category
@property
def payoff(self) -> 'BattleSharedPayoff': # noqa
return self._payoff
@property
def checkpoint_path(self) -> str:
return self._checkpoint_path
@property
def player_id(self) -> str:
return self._player_id
@property
def total_agent_step(self) -> int:
return self._total_agent_step
@total_agent_step.setter
def total_agent_step(self, step: int) -> None:
self._total_agent_step = step
@PLAYER_REGISTRY.register('historical_player')
class HistoricalPlayer(Player):
"""
Overview:
Historical player which is snapshotted from an active player, and is fixed with the checkpoint.
Have a unique attribute ``parent_id``.
Property:
race, payoff, checkpoint_path, player_id, total_agent_step, parent_id
"""
_name = "HistoricalPlayer"
def __init__(self, *args, parent_id: str) -> None:
"""
Overview:
Initialize ``_parent_id`` additionally
Arguments:
- parent_id (:obj:`str`): id of historical player's parent, should be an active player
"""
super().__init__(*args)
self._parent_id = parent_id
@property
def parent_id(self) -> str:
return self._parent_id
class ActivePlayer(Player):
"""
Overview:
Active player can be updated, or snapshotted to a historical player in the league training.
Interface:
__init__, is_trained_enough, snapshot, mutate, get_job
Property:
race, payoff, checkpoint_path, player_id, total_agent_step
"""
_name = "ActivePlayer"
BRANCH = namedtuple("BRANCH", ['name', 'prob'])
def __init__(self, *args, **kwargs) -> None:
"""
Overview:
Initialize player metadata, depending on the game
Note:
- one_phase_step (:obj:`int`): An active player will be considered trained enough for snapshot \
after two phase steps.
- last_enough_step (:obj:`int`): Player's last step number that satisfies ``_is_trained_enough``.
- strong_win_rate (:obj:`float`): If win rates between this player and all the opponents are greater than
this value, this player can be regarded as strong enough to these opponents. \
If also already trained for one phase step, this player can be regarded as trained enough for snapshot.
- branch_probs (:obj:`namedtuple`): A namedtuple of probabilities of selecting different opponent branch.
"""
super().__init__(*args)
self._one_phase_step = int(float(self._cfg.one_phase_step)) # ``one_phase_step`` is like 1e9
self._last_enough_step = 0
self._strong_win_rate = self._cfg.strong_win_rate
assert isinstance(self._cfg.branch_probs, dict)
self._branch_probs = [self.BRANCH(k, v) for k, v in self._cfg.branch_probs.items()]
# self._eval_opponent_difficulty = ["WEAK", "MEDIUM", "STRONG"]
self._eval_opponent_difficulty = ["RULE_BASED"]
self._eval_opponent_index = 0
def is_trained_enough(self, select_fn: Optional[Callable] = None) -> bool:
"""
Overview:
Judge whether this player is trained enough for further operations(e.g. snapshot, mutate...)
according to past step count and overall win rates against opponents.
If yes, set ``self._last_agent_step`` to ``self._total_agent_step`` and return True; otherwise return False.
Arguments:
- select_fn (:obj:`function`): The function to select opponent players.
Returns:
- flag (:obj:`bool`): Whether this player is trained enough
"""
if select_fn is None:
select_fn = lambda x: isinstance(x, HistoricalPlayer) # noqa
step_passed = self._total_agent_step - self._last_enough_step
if step_passed < self._one_phase_step:
return False
elif step_passed >= 2 * self._one_phase_step:
# ``step_passed`` is 2 times of ``self._one_phase_step``, regarded as trained enough
self._last_enough_step = self._total_agent_step
return True
else:
# Get payoff against specific opponents (Different players have different type of opponent players)
# If min win rate is larger than ``self._strong_win_rate``, then is judged trained enough
selected_players = self._get_players(select_fn)
if len(selected_players) == 0: # No such player, therefore no past game
return False
win_rates = self._payoff[self, selected_players]
if win_rates.min() > self._strong_win_rate:
self._last_enough_step = self._total_agent_step
return True
else:
return False
def snapshot(self) -> HistoricalPlayer:
"""
Overview:
Generate a snapshot historical player from the current player, called in league's ``_snapshot``.
Returns:
- snapshot_player (:obj:`HistoricalPlayer`): new instantiated historical player
.. note::
This method only generates a historical player object, but without saving the checkpoint, which should be
done by league.
"""
path = self.checkpoint_path.split('.pth')[0] + '_{}'.format(self._total_agent_step) + '.pth'
return HistoricalPlayer(
self._cfg,
self.category,
self.payoff,
path,
self.player_id + '_{}_historical'.format(int(self._total_agent_step)),
self._total_agent_step,
parent_id=self.player_id
)
def mutate(self, info: dict) -> Optional[str]:
"""
Overview:
Mutate the current player, called in league's ``_mutate_player``.
Arguments:
- info (:obj:`dict`): related information for the mutation
Returns:
- mutation_result (:obj:`str`): if the player does the mutation operation then returns the
corresponding model path, otherwise returns None
"""
pass
def get_job(self, eval_flag: bool = False) -> dict:
"""
Overview:
Get a dict containing some info about the job to be launched, e.g. the selected opponent.
Arguments:
- eval_flag (:obj:`bool`): Whether to select an opponent for evaluator task.
Returns:
- ret (:obj:`dict`): The returned dict. Should contain key ['opponent'].
"""
if eval_flag:
# eval opponent is a str.
opponent = self._eval_opponent_difficulty[self._eval_opponent_index]
else:
# collect opponent is a Player.
opponent = self._get_collect_opponent()
return {
'opponent': opponent,
}
def _get_collect_opponent(self) -> Player:
"""
Overview:
Select an opponent according to the player's ``branch_probs``.
Returns:
- opponent (:obj:`Player`): Selected opponent.
"""
p = np.random.uniform()
L = len(self._branch_probs)
cum_p = [0.] + [sum([j.prob for j in self._branch_probs[:i + 1]]) for i in range(L)]
idx = [cum_p[i] <= p < cum_p[i + 1] for i in range(L)].index(True)
branch_name = '_{}_branch'.format(self._branch_probs[idx].name)
opponent = getattr(self, branch_name)()
return opponent
def _get_players(self, select_fn: Callable) -> List[Player]:
"""
Overview:
Get a list of players in the league (shared_payoff), selected by ``select_fn`` .
Arguments:
- select_fn (:obj:`function`): players in the returned list must satisfy this function
Returns:
- players (:obj:`list`): a list of players that satisfies ``select_fn``
"""
return [player for player in self._payoff.players if select_fn(player)]
def _get_opponent(self, players: list, p: Optional[np.ndarray] = None) -> Player:
"""
Overview:
Get one opponent player from list ``players`` according to probability ``p``.
Arguments:
- players (:obj:`list`): a list of players that can select opponent from
- p (:obj:`np.ndarray`): the selection probability of each player, should have the same size as \
``players``. If you don't need it and set None, it would select uniformly by default.
Returns:
- opponent_player (:obj:`Player`): a random chosen opponent player according to probability
"""
idx = np.random.choice(len(players), p=p)
return players[idx]
def increment_eval_difficulty(self) -> bool:
"""
Overview:
When evaluating, active player will choose a specific builtin opponent difficulty.
This method is used to increment the difficulty.
It is usually called after the easier builtin bot is already been beaten by this player.
Returns:
- increment_or_not (:obj:`bool`): True means difficulty is incremented; \
False means difficulty is already the hardest.
"""
if self._eval_opponent_index < len(self._eval_opponent_difficulty) - 1:
self._eval_opponent_index += 1
return True
else:
return False
@property
def checkpoint_path(self) -> str:
return self._checkpoint_path
@checkpoint_path.setter
def checkpoint_path(self, path: str) -> None:
self._checkpoint_path = path
@PLAYER_REGISTRY.register('naive_sp_player')
class NaiveSpPlayer(ActivePlayer):
def _pfsp_branch(self) -> HistoricalPlayer:
"""
Overview:
Select prioritized fictitious self-play opponent, should be a historical player.
Returns:
- player (:obj:`HistoricalPlayer`): The selected historical player.
"""
historical = self._get_players(lambda p: isinstance(p, HistoricalPlayer))
win_rates = self._payoff[self, historical]
# Normal self-play if no historical players
if win_rates.shape == (0, ):
return self
p = pfsp(win_rates, weighting='squared')
return self._get_opponent(historical, p)
def _sp_branch(self) -> ActivePlayer:
"""
Overview:
Select normal self-play opponent
"""
return self
def create_player(cfg: EasyDict, player_type: str, *args, **kwargs) -> Player:
"""
Overview:
Given the key (player_type), create a new player instance if in player_mapping's values,
or raise an KeyError. In other words, a derived player must first register then call ``create_player``
to get the instance object.
Arguments:
- cfg (:obj:`EasyDict`): player config, necessary keys: [import_names]
- player_type (:obj:`str`): the type of player to be created
Returns:
- player (:obj:`Player`): the created new player, should be an instance of one of \
player_mapping's values
"""
import_module(cfg.get('import_names', []))
return PLAYER_REGISTRY.build(player_type, *args, **kwargs)
| [
"numpy.random.uniform",
"ding.utils.PLAYER_REGISTRY.build",
"ding.utils.PLAYER_REGISTRY.register",
"collections.namedtuple"
] | [((2445, 2490), 'ding.utils.PLAYER_REGISTRY.register', 'PLAYER_REGISTRY.register', (['"""historical_player"""'], {}), "('historical_player')\n", (2469, 2490), False, 'from ding.utils import import_module, PLAYER_REGISTRY\n'), ((11628, 11671), 'ding.utils.PLAYER_REGISTRY.register', 'PLAYER_REGISTRY.register', (['"""naive_sp_player"""'], {}), "('naive_sp_player')\n", (11652, 11671), False, 'from ding.utils import import_module, PLAYER_REGISTRY\n'), ((3599, 3637), 'collections.namedtuple', 'namedtuple', (['"""BRANCH"""', "['name', 'prob']"], {}), "('BRANCH', ['name', 'prob'])\n", (3609, 3637), False, 'from collections import namedtuple\n'), ((13199, 13250), 'ding.utils.PLAYER_REGISTRY.build', 'PLAYER_REGISTRY.build', (['player_type', '*args'], {}), '(player_type, *args, **kwargs)\n', (13220, 13250), False, 'from ding.utils import import_module, PLAYER_REGISTRY\n'), ((9116, 9135), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (9133, 9135), True, 'import numpy as np\n')] |
#-*-coding:utf-8-*-
# date:2021-06-15
# Author: Eric.Lee
# function: handpose 3D Yolo_v3 Detect Inference
import os
import argparse
import torch
import torch.nn as nn
import numpy as np
import time
import datetime
import os
import math
from datetime import datetime
import cv2
import torch.nn.functional as F
from models.resnet import resnet18,resnet34,resnet50,resnet101
from e3d_data_iter.datasets import letterbox,get_heatmap
import sys
sys.path.append("./components/") # 添加模型组件路径
from hand_keypoints.handpose_x import handpose_x_model,draw_bd_handpose_c
from hand_detect.yolo_v3_hand import yolo_v3_hand_model
from utils.common_utils import *
import copy
from utils import func, bone, AIK, smoother
from utils.LM_new import LM_Solver
from op_pso import PSO
import open3d
from mpl_toolkits.mplot3d import Axes3D
from manopth import manolayer
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' Project Hand Pose 3D Inference')
parser.add_argument('--model_path', type=str, default = './if_package/e3d_handposex-resnet_50-size-128-loss-wing_loss-20210620.pth',
help = 'model_path') # e3d handpose 模型路径
parser.add_argument('--detect_model_path', type=str, default = './if_package/hand_detect_416-20210606.pt',
help = 'model_path') # detect 模型路径
parser.add_argument('--handpose_x2d_model_path', type=str, default = './if_package/handposex_2d_resnet_50-size-256-wingloss102-0.119.pth',
help = 'model_path') # 手2维关键点 模型路径
parser.add_argument('--model', type=str, default = 'resnet_50',
help = '''model : resnet_18,resnet_34,resnet_50,resnet_101''') # 模型类型
parser.add_argument('--num_classes', type=int , default = 63,
help = 'num_classes') # 手部21关键点, (x,y)*2 = 42
parser.add_argument('--GPUS', type=str, default = '0',
help = 'GPUS') # GPU选择
parser.add_argument('--test_path', type=str, default = './image/',
help = 'test_path') # 测试图片路径
parser.add_argument('--img_size', type=tuple , default = (128,128),
help = 'img_size') # 输入模型图片尺寸
parser.add_argument('--vis', type=bool , default = True,
help = 'vis') # 是否可视化图片
print('\n/******************* {} ******************/\n'.format(parser.description))
#--------------------------------------------------------------------------
ops = parser.parse_args()# 解析添加参数
#--------------------------------------------------------------------------
print('----------------------------------')
unparsed = vars(ops) # parse_args()方法的返回值为namespace,用vars()内建函数化为字典
for key in unparsed.keys():
print('{} : {}'.format(key,unparsed[key]))
#---------------------------------------------------------------------------
os.environ['CUDA_VISIBLE_DEVICES'] = ops.GPUS
test_path = ops.test_path # 测试图片文件夹路径
#---------------------------------------------------------------- 构建模型
print('use model : %s'%(ops.model))
if ops.model == 'resnet_50':
model_ = resnet50(num_classes = ops.num_classes,img_size=ops.img_size[0])
elif ops.model == 'resnet_18':
model_ = resnet18(num_classes = ops.num_classes,img_size=ops.img_size[0])
elif ops.model == 'resnet_34':
model_ = resnet34(num_classes = ops.num_classes,img_size=ops.img_size[0])
elif ops.model == 'resnet_101':
model_ = resnet101(num_classes = ops.num_classes,img_size=ops.img_size[0])
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
model_ = model_.to(device)
model_.eval() # 设置为前向推断模式
# print(model_)# 打印模型结构
# 加载测试模型
if os.access(ops.model_path,os.F_OK):# checkpoint
chkpt = torch.load(ops.model_path, map_location=device)
model_.load_state_dict(chkpt)
print('load test model : {}'.format(ops.model_path))
#----------------- 构建 handpose_x 2D关键点检测模型
handpose_2d_model = handpose_x_model(model_path = ops.handpose_x2d_model_path)
#----------------- 构建 yolo 检测模型
hand_detect_model = yolo_v3_hand_model(model_path = ops.detect_model_path,model_arch = "yolo",conf_thres = 0.3)
# hand_detect_model = yolo_v3_hand_model()
#----------------- 构建 manopth
g_side = "right"
print('load model finished')
pose, shape = func.initiate("zero")
pre_useful_bone_len = np.zeros((1, 15)) # 骨架点信息
solver = LM_Solver(num_Iter=99, th_beta=shape.cpu(), th_pose=pose.cpu(), lb_target=pre_useful_bone_len,
weight=1e-5)
pose0 = torch.eye(3).repeat(1, 16, 1, 1)
mano = manolayer.ManoLayer(flat_hand_mean=True,
side=g_side,
mano_root='./mano/models',
use_pca=False,
root_rot_mode='rotmat',
joint_rot_mode='rotmat')
print('start ~')
point_fliter = smoother.OneEuroFilter(23.0, 0.0)
mesh_fliter = smoother.OneEuroFilter(23.0, 0.0)
shape_fliter = smoother.OneEuroFilter(1.5, 0.0)
#--------------------------- 配置点云
view_mat = np.array([[1.0, 0.0, 0.0],
[0.0, -1.0, 0],
[0.0, 0, -1.0]])
mesh = open3d.geometry.TriangleMesh()
hand_verts, j3d_recon = mano(pose0, shape.float())
mesh.triangles = open3d.utility.Vector3iVector(mano.th_faces)
hand_verts = hand_verts.clone().detach().cpu().numpy()[0]
mesh.vertices = open3d.utility.Vector3dVector(hand_verts)
viewer = open3d.visualization.Visualizer()
viewer.create_window(width=800, height=800, window_name='HandPose3d_Mesh')
viewer.add_geometry(mesh)
viewer.update_renderer()
renderOptions = viewer.get_render_option ()
renderOptions.background_color = np.asarray([120/255,120/255,120/255]) # 设置背景颜色
# axis_pcd = open3d.create_mesh_coordinate_frame(size=0.5, origin=[0, 0, 0])
# vis.add_geometry(axis_pcd)
pts_flag = False
if pts_flag:
test_pcd = open3d.geometry.PointCloud() # 定义点云
viewer.add_geometry(test_pcd)
print('start pose estimate')
pre_uv = None
shape_time = 0
opt_shape = None
shape_flag = True
#---------------------------------------------------------------- 预测图片
with torch.no_grad():
idx = 0
cap = cv2.VideoCapture(0) #一般usb默认相机号为 0,如果没有相机无法启动,如果相机不为0需要自行确定其编号。
while True:
ret, img_o = cap.read()# 获取相机图像
if ret == True:# 如果 ret 返回值为 True,显示图片
img_yolo_x = img_o.copy()
hand_bbox =hand_detect_model.predict(img_yolo_x,vis = False) # 检测手,获取手的边界框
if len(hand_bbox) == 1:
#----------------------------------
finger_index = None # 食指UI的二维坐标
finger_thumb = None # 大拇UI指的二维坐标
#----------------------------------
x_min,y_min,x_max,y_max,_ = hand_bbox[0]
w_ = max(abs(x_max-x_min),abs(y_max-y_min))
w_ = w_*1.6
x_mid = (x_max+x_min)/2
y_mid = (y_max+y_min)/2
#
x1,y1,x2,y2 = int(x_mid-w_/2),int(y_mid-w_/2),int(x_mid+w_/2),int(y_mid+w_/2)
#
x1 = int(np.clip(x1,0,img_o.shape[1]-1))
y1 = int(np.clip(y1,0,img_o.shape[0]-1))
x2 = int(np.clip(x2,0,img_o.shape[1]-1))
y2 = int(np.clip(y2,0,img_o.shape[0]-1))
img = img_o[y1:y2,x1:x2]
else:
continue
#--------------------------------
img_show = img.copy() # 用于显示使用
pts_2d_ = handpose_2d_model.predict(img.copy()) # handpose_2d predict
pts_2d_hand = {}
kps_min_x,kps_min_y,kps_max_x,kps_max_y = 65535.,65535.,0.,0.
for ptk in range(int(pts_2d_.shape[0]/2)):
xh = pts_2d_[ptk*2+0]*float(img.shape[1])
yh = pts_2d_[ptk*2+1]*float(img.shape[0])
pts_2d_hand[str(ptk)] = {
"x":xh,
"y":yh,
}
kps_min_x = (xh+x1) if (xh+x1)<kps_min_x else kps_min_x
kps_min_y = (yh+y1) if (yh+y1)<kps_min_y else kps_min_y
kps_max_x = (xh+x1) if (xh+x1)>kps_max_x else kps_max_x
kps_max_y = (yh+y1) if (yh+y1)>kps_max_y else kps_max_y
if ptk == 3 or ptk == 4:
if finger_thumb is None:
finger_thumb = (int(xh+x1),int(yh+y1))
else:
finger_thumb = (int((xh+x1+finger_thumb[0])/2),int((yh+y1+finger_thumb[1])/2))
if ptk == 7 or ptk == 8:
if finger_index is None:
finger_index = (int(xh+x1),int(yh+y1))
else:
finger_index = (int((xh+x1+finger_index[0])/2),int((yh+y1+finger_index[1])/2))
# cv2.circle(img_show, finger_index, 9, (25,160,255),-1)
if ops.vis:
cv2.circle(img_show, (int(xh),int(yh)), 4, (255,50,60),-1)
cv2.circle(img_show, (int(xh),int(yh)), 3, (25,160,255),-1)
hand2d_kps_bbox = (int(kps_min_x),int(kps_min_y),int(kps_max_x),int(kps_max_y)) # 手关键点边界框
if ops.vis:
draw_bd_handpose_c(img_show,pts_2d_hand,0,0,2)
cv2.namedWindow("handpose_2d",0)
cv2.imshow("handpose_2d",img_show)
#--------------------------------
img_lbox,ratio, dw, dh = letterbox(img.copy(), height=ops.img_size[0], color=(0,0,0))
# if ops.vis:
# cv2.namedWindow("letterbox",0)
# cv2.imshow("letterbox",img_lbox)
#-------------------------------- get heatmap
x1y1x2y2 = 0,0,0,0
offset_x1,offset_y1 = 0,0
hm,hm_w = get_heatmap(img_lbox.copy(),x1y1x2y2,pts_2d_hand,ratio, dw, dh,offset_x1,offset_y1,vis=False)
if ops.vis:
cv2.namedWindow("hm_w",0)
cv2.imshow("hm_w",hm_w)
#--------------------------------
img_fix_size = img_lbox.astype(np.float32)
img_fix_size_r = img_fix_size.astype(np.float32)
img_fix_size_r = (img_fix_size_r-128.)/256.
#--------------------------------------------------
image_fusion = np.concatenate((img_fix_size_r,hm),axis=2)
image_fusion = image_fusion.transpose(2, 0, 1)
image_fusion = torch.from_numpy(image_fusion)
image_fusion = image_fusion.unsqueeze_(0)
if use_cuda:
image_fusion = image_fusion.cuda() # (bs, channel, h, w)
# print("image_fusion size : {}".format(image_fusion.size()))
#-------------------------------- # handpose_3d predict
pre_ = model_(image_fusion.float()) # 模型推理
output = pre_.cpu().detach().numpy()
output = np.squeeze(output)
# print("handpose_3d output shape : {}".format(output.shape))
pre_3d_joints = output.reshape((21,3))
# print("pre_3d_joints shape : {}".format(pre_3d_joints.shape))
if g_side == "left":
print("------------------->>. left")
pre_3d_joints[:,0] *=(-1.)
pre_3d_joints = torch.tensor(pre_3d_joints).squeeze(0)
pre_3d_joints= pre_3d_joints.cuda()
# print(pre_3d_joints.size())
#--------------------------------------------------------------------
# now_uv = result['uv'].clone().detach().cpu().numpy()[0, 0]
# now_uv = now_uv.astype(np.float)
trans = np.zeros((1, 3))
# trans[0, 0:2] = now_uv - 16.0
trans = trans / 16.0
new_tran = np.array([[trans[0, 1], trans[0, 0], trans[0, 2]]])
pre_joints = pre_3d_joints.clone().detach().cpu().numpy()
flited_joints = point_fliter.process(pre_joints)
# fliter_ax.cla()
#
# filted_ax = vis.plot3d(flited_joints + new_tran, fliter_ax)
# pre_useful_bone_len = bone.caculate_length(pre_joints, label="useful")
pre_useful_bone_len = bone.caculate_length(pre_joints, label="useful")
NGEN = 0 # PSO 迭代次数
popsize = 100
low = np.zeros((1, 10)) - 3.0
up = np.zeros((1, 10)) - 2.0
parameters = [NGEN, popsize, low, up]
pso = PSO(parameters, pre_useful_bone_len.reshape((1, 15)),g_side)
pso.main(solver)
if True:#opt_shape is None:
opt_shape = pso.ng_best
opt_shape = shape_fliter.process(opt_shape)
opt_tensor_shape = torch.tensor(opt_shape, dtype=torch.float)
_, j3d_p0_ops = mano(pose0, opt_tensor_shape)
template = j3d_p0_ops.cpu().numpy().squeeze(0) / 1000.0 # template, m 21*3
ratio = np.linalg.norm(template[9] - template[0]) / np.linalg.norm(pre_joints[9] - pre_joints[0])
j3d_pre_process = pre_joints * ratio # template, m
j3d_pre_process = j3d_pre_process - j3d_pre_process[0] + template[0]
pose_R = AIK.adaptive_IK(template, j3d_pre_process)
pose_R = torch.from_numpy(pose_R).float()
# reconstruction
hand_verts, j3d_recon = mano(pose_R, opt_tensor_shape.float())
hand_verts[:,:,:] = hand_verts[:,:,:]*(0.503)
# print(j3d_recon.size())
mesh.triangles = open3d.utility.Vector3iVector(mano.th_faces)
hand_verts = hand_verts.clone().detach().cpu().numpy()[0]
hand_verts = mesh_fliter.process(hand_verts)
hand_verts = np.matmul(view_mat, hand_verts.T).T
if g_side == "right":
hand_verts[:, 0] = hand_verts[:, 0] - 40
else:
hand_verts[:, 0] = hand_verts[:, 0] + 40
hand_verts[:, 1] = hand_verts[:, 1] - 0
mesh_tran = np.array([[-new_tran[0, 0], new_tran[0, 1], new_tran[0, 2]]])
hand_verts = hand_verts - 100 * mesh_tran
mesh.vertices = open3d.utility.Vector3dVector(hand_verts)
# mesh.paint_uniform_color([252 / 255, 224 / 255, 203 / 255])
# mesh.paint_uniform_color([238 / 255, 188 / 255, 158 / 255])
mesh.paint_uniform_color([87 / 255, 131 / 255, 235 / 255])
mesh.compute_triangle_normals()
mesh.compute_vertex_normals()
#-----------
if pts_flag:
if False:
j3d_ = j3d_recon.detach().cpu().numpy()
j3d_[0][:,1] *=(-1.)
# j3d_[0][:,0] +=trans[0,0]
j3d_[0] = j3d_[0] - 100 * mesh_tran
j3d_[0][:,0] -=50
j3d_[0][:,1] -=30
# print(j3d_.shape,j3d_)
test_pcd.points = open3d.utility.Vector3dVector(j3d_[0]) # 定义点云坐标位置
else:
# 778*3
print("hand_verts shape : {}".format(hand_verts.shape))
# a = np.concatenate((
# hand_verts[38:40,:],
# hand_verts[40:100,:]
# ),axis=0)
test_pcd.points = open3d.utility.Vector3dVector(hand_verts)
pre_joints[:,1] *=-1.
pre_joints = pre_joints*70
pre_joints[:,1] -= 40
pre_joints[:,0] -= 0
# print("pre_joints",pre_joints.shape)
# test_pcd.points = open3d.utility.Vector3dVector(pre_joints)
# test_pcd.points = open3d.utility.Vector3dVector(pre_joints[1,:].reshape(1,3))
# rgb = np.asarray([250,0,250])
# rgb_t = np.transpose(rgb)
# test_pcd.colors = open3d.utility.Vector3dVector(rgb_t.astype(np.float) / 255.0)
# print("hand_verts shape",hand_verts)
#-----------
viewer.update_geometry(mesh)
if pts_flag:
viewer.update_geometry(test_pcd)
viewer.poll_events()
viewer.update_renderer()
#---------------------------------------------------------------
image_open3d = viewer.capture_screen_float_buffer(False)
# viewer.capture_screen_image("open3d.jpg", False)
# depth = vis.capture_depth_float_buffer(False)
image_3d = viewer.capture_screen_float_buffer(False)
image_3d = np.asarray(image_3d)
image_3d = image_3d*255
image_3d = np.clip(image_3d,0,255)
image_3d = image_3d.astype(np.uint8)
image_3d = cv2.cvtColor(image_3d, cv2.COLOR_RGB2BGR)
# print(image_3d.shape)
mask_0 = np.where(image_3d[:,:,0]!=120,1,0)
mask_1 = np.where(image_3d[:,:,1]!=120,1,0)
mask_2 = np.where(image_3d[:,:,2]!=120,1,0)
img_mask = np.logical_or(mask_0,mask_1)
img_mask = np.logical_or(img_mask,mask_2)
# cv2.namedWindow("img_mask",0)
# cv2.imshow("img_mask",img_mask.astype(np.float))
locs = np.where(img_mask != 0)
xx1 = np.min(locs[1])
xx2 = np.max(locs[1])
yy1 = np.min(locs[0])
yy2 = np.max(locs[0])
# cv2.rectangle(image_3d, (xx1,yy1), (xx2,yy2), (255,0,255), 5) # 绘制image_3d
model_hand_w = (xx2-xx1)
model_hand_h = (yy2-yy1)
#----------
cv2.namedWindow("image_3d",0)
cv2.imshow("image_3d",image_3d)
# cv2.rectangle(img_yolo_x, (hand2d_kps_bbox[0],hand2d_kps_bbox[1]),
# (hand2d_kps_bbox[2],hand2d_kps_bbox[3]), (0,165,255), 3) # 绘制2d 手关键点
# scale_ = ((x_max-x_min)/(xx2-xx1) + (y_max-y_min)/(yy2-yy1))/2.*1.01
scale_ = ((hand2d_kps_bbox[2]-hand2d_kps_bbox[0])/(xx2-xx1)
+ (hand2d_kps_bbox[3]-hand2d_kps_bbox[1])/(yy2-yy1))/2.*1.2
w_3d_ = (xx2-xx1)*scale_
h_3d_ = (yy2-yy1)*scale_
x_mid_3d = (xx1+xx2)/2.
y_mid_3d = (yy1+yy2)/2.
x_mid,y_mid = int(x_mid),int(y_mid)
x1,y1,x2,y2 = int(x_mid-w_3d_/2.),int(y_mid-h_3d_/2.),int(x_mid+w_3d_/2.),int(y_mid+h_3d_/2.)
crop_ = image_3d[yy1:yy2,xx1:xx2]
crop_mask = (img_mask[yy1:yy2,xx1:xx2].astype(np.float)*255).astype(np.uint8)
w_r,h_r = int(crop_.shape[1]*scale_/2),int(crop_.shape[0]*scale_/2)
crop_ = cv2.resize(crop_, (w_r*2, h_r*2))
crop_mask = cv2.resize(crop_mask, (w_r*2, h_r*2))
crop_mask = np.where(crop_mask[:,:]>0.,1.,0.)
crop_mask = np.expand_dims(crop_mask, 2)
try:
#------------
img_ff = img_yolo_x[int(y_mid - h_r ):int(y_mid + h_r ),int(x_mid - w_r ):int(x_mid + w_r ),:]*(1.-crop_mask) + crop_*crop_mask
img_yolo_x[int(y_mid - h_r ):int(y_mid + h_r ),int(x_mid - w_r ):int(x_mid + w_r ),:] = img_ff.astype(np.uint8)
except:
continue
real_hand_w = w_r*2
real_hand_h = h_r*2
depth_z = (model_hand_h/real_hand_h + model_hand_w/real_hand_w)/2.# 相对深度 z
#
cv2.putText(img_yolo_x, " Relative Depth_Z :{:.3f} ".format(depth_z), (4,42),cv2.FONT_HERSHEY_DUPLEX, 1.1, (55, 0, 220),7)
cv2.putText(img_yolo_x, " Relative Depth_Z :{:.3f} ".format(depth_z), (4,42),cv2.FONT_HERSHEY_DUPLEX, 1.1, (25, 180, 250),1)
cv2.namedWindow("img_yolo_x",0)
cv2.imshow("img_yolo_x",img_yolo_x)
# x_mid = (x_max+x_min)/2
# y_mid = (y_max+y_min)/2
if cv2.waitKey(1) == 27:
break
else:
break
cv2.destroyAllWindows()
print('well done ')
| [
"utils.bone.caculate_length",
"utils.AIK.adaptive_IK",
"torch.eye",
"argparse.ArgumentParser",
"open3d.geometry.PointCloud",
"numpy.clip",
"utils.func.initiate",
"hand_detect.yolo_v3_hand.yolo_v3_hand_model",
"numpy.linalg.norm",
"torch.device",
"torch.no_grad",
"cv2.imshow",
"models.resnet.... | [((443, 475), 'sys.path.append', 'sys.path.append', (['"""./components/"""'], {}), "('./components/')\n", (458, 475), False, 'import sys\n'), ((891, 961), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""" Project Hand Pose 3D Inference"""'}), "(description=' Project Hand Pose 3D Inference')\n", (914, 961), False, 'import argparse\n'), ((3423, 3448), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3446, 3448), False, 'import torch\n'), ((3463, 3508), 'torch.device', 'torch.device', (["('cuda:0' if use_cuda else 'cpu')"], {}), "('cuda:0' if use_cuda else 'cpu')\n", (3475, 3508), False, 'import torch\n'), ((3618, 3652), 'os.access', 'os.access', (['ops.model_path', 'os.F_OK'], {}), '(ops.model_path, os.F_OK)\n', (3627, 3652), False, 'import os\n'), ((3900, 3956), 'hand_keypoints.handpose_x.handpose_x_model', 'handpose_x_model', ([], {'model_path': 'ops.handpose_x2d_model_path'}), '(model_path=ops.handpose_x2d_model_path)\n', (3916, 3956), False, 'from hand_keypoints.handpose_x import handpose_x_model, draw_bd_handpose_c\n'), ((4019, 4110), 'hand_detect.yolo_v3_hand.yolo_v3_hand_model', 'yolo_v3_hand_model', ([], {'model_path': 'ops.detect_model_path', 'model_arch': '"""yolo"""', 'conf_thres': '(0.3)'}), "(model_path=ops.detect_model_path, model_arch='yolo',\n conf_thres=0.3)\n", (4037, 4110), False, 'from hand_detect.yolo_v3_hand import yolo_v3_hand_model\n'), ((4265, 4286), 'utils.func.initiate', 'func.initiate', (['"""zero"""'], {}), "('zero')\n", (4278, 4286), False, 'from utils import func, bone, AIK, smoother\n'), ((4313, 4330), 'numpy.zeros', 'np.zeros', (['(1, 15)'], {}), '((1, 15))\n', (4321, 4330), True, 'import numpy as np\n'), ((4540, 4694), 'manopth.manolayer.ManoLayer', 'manolayer.ManoLayer', ([], {'flat_hand_mean': '(True)', 'side': 'g_side', 'mano_root': '"""./mano/models"""', 'use_pca': '(False)', 'root_rot_mode': '"""rotmat"""', 'joint_rot_mode': '"""rotmat"""'}), "(flat_hand_mean=True, side=g_side, mano_root=\n './mano/models', use_pca=False, root_rot_mode='rotmat', joint_rot_mode=\n 'rotmat')\n", (4559, 4694), False, 'from manopth import manolayer\n'), ((4880, 4913), 'utils.smoother.OneEuroFilter', 'smoother.OneEuroFilter', (['(23.0)', '(0.0)'], {}), '(23.0, 0.0)\n', (4902, 4913), False, 'from utils import func, bone, AIK, smoother\n'), ((4932, 4965), 'utils.smoother.OneEuroFilter', 'smoother.OneEuroFilter', (['(23.0)', '(0.0)'], {}), '(23.0, 0.0)\n', (4954, 4965), False, 'from utils import func, bone, AIK, smoother\n'), ((4985, 5017), 'utils.smoother.OneEuroFilter', 'smoother.OneEuroFilter', (['(1.5)', '(0.0)'], {}), '(1.5, 0.0)\n', (5007, 5017), False, 'from utils import func, bone, AIK, smoother\n'), ((5071, 5130), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, -1.0, 0], [0.0, 0, -1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, -1.0, 0], [0.0, 0, -1.0]])\n', (5079, 5130), True, 'import numpy as np\n'), ((5192, 5222), 'open3d.geometry.TriangleMesh', 'open3d.geometry.TriangleMesh', ([], {}), '()\n', (5220, 5222), False, 'import open3d\n'), ((5299, 5343), 'open3d.utility.Vector3iVector', 'open3d.utility.Vector3iVector', (['mano.th_faces'], {}), '(mano.th_faces)\n', (5328, 5343), False, 'import open3d\n'), ((5426, 5467), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['hand_verts'], {}), '(hand_verts)\n', (5455, 5467), False, 'import open3d\n'), ((5481, 5514), 'open3d.visualization.Visualizer', 'open3d.visualization.Visualizer', ([], {}), '()\n', (5512, 5514), False, 'import open3d\n'), ((5738, 5783), 'numpy.asarray', 'np.asarray', (['[120 / 255, 120 / 255, 120 / 255]'], {}), '([120 / 255, 120 / 255, 120 / 255])\n', (5748, 5783), True, 'import numpy as np\n'), ((20928, 20951), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (20949, 20951), False, 'import cv2\n'), ((2989, 3052), 'models.resnet.resnet50', 'resnet50', ([], {'num_classes': 'ops.num_classes', 'img_size': 'ops.img_size[0]'}), '(num_classes=ops.num_classes, img_size=ops.img_size[0])\n', (2997, 3052), False, 'from models.resnet import resnet18, resnet34, resnet50, resnet101\n'), ((3681, 3728), 'torch.load', 'torch.load', (['ops.model_path'], {'map_location': 'device'}), '(ops.model_path, map_location=device)\n', (3691, 3728), False, 'import torch\n'), ((5957, 5985), 'open3d.geometry.PointCloud', 'open3d.geometry.PointCloud', ([], {}), '()\n', (5983, 5985), False, 'import open3d\n'), ((6232, 6247), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6245, 6247), False, 'import torch\n'), ((6279, 6298), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (6295, 6298), False, 'import cv2\n'), ((3106, 3169), 'models.resnet.resnet18', 'resnet18', ([], {'num_classes': 'ops.num_classes', 'img_size': 'ops.img_size[0]'}), '(num_classes=ops.num_classes, img_size=ops.img_size[0])\n', (3114, 3169), False, 'from models.resnet import resnet18, resnet34, resnet50, resnet101\n'), ((4495, 4507), 'torch.eye', 'torch.eye', (['(3)'], {}), '(3)\n', (4504, 4507), False, 'import torch\n'), ((3223, 3286), 'models.resnet.resnet34', 'resnet34', ([], {'num_classes': 'ops.num_classes', 'img_size': 'ops.img_size[0]'}), '(num_classes=ops.num_classes, img_size=ops.img_size[0])\n', (3231, 3286), False, 'from models.resnet import resnet18, resnet34, resnet50, resnet101\n'), ((10686, 10730), 'numpy.concatenate', 'np.concatenate', (['(img_fix_size_r, hm)'], {'axis': '(2)'}), '((img_fix_size_r, hm), axis=2)\n', (10700, 10730), True, 'import numpy as np\n'), ((10823, 10853), 'torch.from_numpy', 'torch.from_numpy', (['image_fusion'], {}), '(image_fusion)\n', (10839, 10853), False, 'import torch\n'), ((11308, 11326), 'numpy.squeeze', 'np.squeeze', (['output'], {}), '(output)\n', (11318, 11326), True, 'import numpy as np\n'), ((12090, 12106), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (12098, 12106), True, 'import numpy as np\n'), ((12219, 12270), 'numpy.array', 'np.array', (['[[trans[0, 1], trans[0, 0], trans[0, 2]]]'], {}), '([[trans[0, 1], trans[0, 0], trans[0, 2]]])\n', (12227, 12270), True, 'import numpy as np\n'), ((12670, 12718), 'utils.bone.caculate_length', 'bone.caculate_length', (['pre_joints'], {'label': '"""useful"""'}), "(pre_joints, label='useful')\n", (12690, 12718), False, 'from utils import func, bone, AIK, smoother\n'), ((13235, 13277), 'torch.tensor', 'torch.tensor', (['opt_shape'], {'dtype': 'torch.float'}), '(opt_shape, dtype=torch.float)\n', (13247, 13277), False, 'import torch\n'), ((13724, 13766), 'utils.AIK.adaptive_IK', 'AIK.adaptive_IK', (['template', 'j3d_pre_process'], {}), '(template, j3d_pre_process)\n', (13739, 13766), False, 'from utils import func, bone, AIK, smoother\n'), ((14076, 14120), 'open3d.utility.Vector3iVector', 'open3d.utility.Vector3iVector', (['mano.th_faces'], {}), '(mano.th_faces)\n', (14105, 14120), False, 'import open3d\n'), ((14587, 14648), 'numpy.array', 'np.array', (['[[-new_tran[0, 0], new_tran[0, 1], new_tran[0, 2]]]'], {}), '([[-new_tran[0, 0], new_tran[0, 1], new_tran[0, 2]]])\n', (14595, 14648), True, 'import numpy as np\n'), ((14740, 14781), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['hand_verts'], {}), '(hand_verts)\n', (14769, 14781), False, 'import open3d\n'), ((17378, 17398), 'numpy.asarray', 'np.asarray', (['image_3d'], {}), '(image_3d)\n', (17388, 17398), True, 'import numpy as np\n'), ((17466, 17491), 'numpy.clip', 'np.clip', (['image_3d', '(0)', '(255)'], {}), '(image_3d, 0, 255)\n', (17473, 17491), True, 'import numpy as np\n'), ((17570, 17611), 'cv2.cvtColor', 'cv2.cvtColor', (['image_3d', 'cv2.COLOR_RGB2BGR'], {}), '(image_3d, cv2.COLOR_RGB2BGR)\n', (17582, 17611), False, 'import cv2\n'), ((17678, 17718), 'numpy.where', 'np.where', (['(image_3d[:, :, 0] != 120)', '(1)', '(0)'], {}), '(image_3d[:, :, 0] != 120, 1, 0)\n', (17686, 17718), True, 'import numpy as np\n'), ((17738, 17778), 'numpy.where', 'np.where', (['(image_3d[:, :, 1] != 120)', '(1)', '(0)'], {}), '(image_3d[:, :, 1] != 120, 1, 0)\n', (17746, 17778), True, 'import numpy as np\n'), ((17798, 17838), 'numpy.where', 'np.where', (['(image_3d[:, :, 2] != 120)', '(1)', '(0)'], {}), '(image_3d[:, :, 2] != 120, 1, 0)\n', (17806, 17838), True, 'import numpy as np\n'), ((17860, 17889), 'numpy.logical_or', 'np.logical_or', (['mask_0', 'mask_1'], {}), '(mask_0, mask_1)\n', (17873, 17889), True, 'import numpy as np\n'), ((17916, 17947), 'numpy.logical_or', 'np.logical_or', (['img_mask', 'mask_2'], {}), '(img_mask, mask_2)\n', (17929, 17947), True, 'import numpy as np\n'), ((18086, 18109), 'numpy.where', 'np.where', (['(img_mask != 0)'], {}), '(img_mask != 0)\n', (18094, 18109), True, 'import numpy as np\n'), ((18132, 18147), 'numpy.min', 'np.min', (['locs[1]'], {}), '(locs[1])\n', (18138, 18147), True, 'import numpy as np\n'), ((18170, 18185), 'numpy.max', 'np.max', (['locs[1]'], {}), '(locs[1])\n', (18176, 18185), True, 'import numpy as np\n'), ((18208, 18223), 'numpy.min', 'np.min', (['locs[0]'], {}), '(locs[0])\n', (18214, 18223), True, 'import numpy as np\n'), ((18246, 18261), 'numpy.max', 'np.max', (['locs[0]'], {}), '(locs[0])\n', (18252, 18261), True, 'import numpy as np\n'), ((18481, 18511), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image_3d"""', '(0)'], {}), "('image_3d', 0)\n", (18496, 18511), False, 'import cv2\n'), ((18527, 18559), 'cv2.imshow', 'cv2.imshow', (['"""image_3d"""', 'image_3d'], {}), "('image_3d', image_3d)\n", (18537, 18559), False, 'import cv2\n'), ((19557, 19594), 'cv2.resize', 'cv2.resize', (['crop_', '(w_r * 2, h_r * 2)'], {}), '(crop_, (w_r * 2, h_r * 2))\n', (19567, 19594), False, 'import cv2\n'), ((19619, 19660), 'cv2.resize', 'cv2.resize', (['crop_mask', '(w_r * 2, h_r * 2)'], {}), '(crop_mask, (w_r * 2, h_r * 2))\n', (19629, 19660), False, 'import cv2\n'), ((19685, 19726), 'numpy.where', 'np.where', (['(crop_mask[:, :] > 0.0)', '(1.0)', '(0.0)'], {}), '(crop_mask[:, :] > 0.0, 1.0, 0.0)\n', (19693, 19726), True, 'import numpy as np\n'), ((19747, 19775), 'numpy.expand_dims', 'np.expand_dims', (['crop_mask', '(2)'], {}), '(crop_mask, 2)\n', (19761, 19775), True, 'import numpy as np\n'), ((20646, 20678), 'cv2.namedWindow', 'cv2.namedWindow', (['"""img_yolo_x"""', '(0)'], {}), "('img_yolo_x', 0)\n", (20661, 20678), False, 'import cv2\n'), ((20694, 20730), 'cv2.imshow', 'cv2.imshow', (['"""img_yolo_x"""', 'img_yolo_x'], {}), "('img_yolo_x', img_yolo_x)\n", (20704, 20730), False, 'import cv2\n'), ((3341, 3405), 'models.resnet.resnet101', 'resnet101', ([], {'num_classes': 'ops.num_classes', 'img_size': 'ops.img_size[0]'}), '(num_classes=ops.num_classes, img_size=ops.img_size[0])\n', (3350, 3405), False, 'from models.resnet import resnet18, resnet34, resnet50, resnet101\n'), ((9527, 9577), 'hand_keypoints.handpose_x.draw_bd_handpose_c', 'draw_bd_handpose_c', (['img_show', 'pts_2d_hand', '(0)', '(0)', '(2)'], {}), '(img_show, pts_2d_hand, 0, 0, 2)\n', (9545, 9577), False, 'from hand_keypoints.handpose_x import handpose_x_model, draw_bd_handpose_c\n'), ((9594, 9627), 'cv2.namedWindow', 'cv2.namedWindow', (['"""handpose_2d"""', '(0)'], {}), "('handpose_2d', 0)\n", (9609, 9627), False, 'import cv2\n'), ((9647, 9682), 'cv2.imshow', 'cv2.imshow', (['"""handpose_2d"""', 'img_show'], {}), "('handpose_2d', img_show)\n", (9657, 9682), False, 'import cv2\n'), ((10281, 10307), 'cv2.namedWindow', 'cv2.namedWindow', (['"""hm_w"""', '(0)'], {}), "('hm_w', 0)\n", (10296, 10307), False, 'import cv2\n'), ((10327, 10351), 'cv2.imshow', 'cv2.imshow', (['"""hm_w"""', 'hm_w'], {}), "('hm_w', hm_w)\n", (10337, 10351), False, 'import cv2\n'), ((12808, 12825), 'numpy.zeros', 'np.zeros', (['(1, 10)'], {}), '((1, 10))\n', (12816, 12825), True, 'import numpy as np\n'), ((12853, 12870), 'numpy.zeros', 'np.zeros', (['(1, 10)'], {}), '((1, 10))\n', (12861, 12870), True, 'import numpy as np\n'), ((13456, 13497), 'numpy.linalg.norm', 'np.linalg.norm', (['(template[9] - template[0])'], {}), '(template[9] - template[0])\n', (13470, 13497), True, 'import numpy as np\n'), ((13500, 13545), 'numpy.linalg.norm', 'np.linalg.norm', (['(pre_joints[9] - pre_joints[0])'], {}), '(pre_joints[9] - pre_joints[0])\n', (13514, 13545), True, 'import numpy as np\n'), ((14285, 14318), 'numpy.matmul', 'np.matmul', (['view_mat', 'hand_verts.T'], {}), '(view_mat, hand_verts.T)\n', (14294, 14318), True, 'import numpy as np\n'), ((20835, 20849), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (20846, 20849), False, 'import cv2\n'), ((7266, 7300), 'numpy.clip', 'np.clip', (['x1', '(0)', '(img_o.shape[1] - 1)'], {}), '(x1, 0, img_o.shape[1] - 1)\n', (7273, 7300), True, 'import numpy as np\n'), ((7327, 7361), 'numpy.clip', 'np.clip', (['y1', '(0)', '(img_o.shape[0] - 1)'], {}), '(y1, 0, img_o.shape[0] - 1)\n', (7334, 7361), True, 'import numpy as np\n'), ((7388, 7422), 'numpy.clip', 'np.clip', (['x2', '(0)', '(img_o.shape[1] - 1)'], {}), '(x2, 0, img_o.shape[1] - 1)\n', (7395, 7422), True, 'import numpy as np\n'), ((7449, 7483), 'numpy.clip', 'np.clip', (['y2', '(0)', '(img_o.shape[0] - 1)'], {}), '(y2, 0, img_o.shape[0] - 1)\n', (7456, 7483), True, 'import numpy as np\n'), ((11715, 11742), 'torch.tensor', 'torch.tensor', (['pre_3d_joints'], {}), '(pre_3d_joints)\n', (11727, 11742), False, 'import torch\n'), ((13792, 13816), 'torch.from_numpy', 'torch.from_numpy', (['pose_R'], {}), '(pose_R)\n', (13808, 13816), False, 'import torch\n'), ((15591, 15629), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['j3d_[0]'], {}), '(j3d_[0])\n', (15620, 15629), False, 'import open3d\n'), ((16011, 16052), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['hand_verts'], {}), '(hand_verts)\n', (16040, 16052), False, 'import open3d\n')] |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped, TwistStamped
from styx_msgs.msg import Lane, Waypoint
from std_msgs.msg import Int32
import math
import numpy as np
from scipy.spatial import KDTree
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 200 # Number of waypoints we will publish.
LOOKAHEAD_FILTER = [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 20, 28, 36, 52, 68, 100, 132, 196]
CENTER_TO_LANE_BUFFER = 4
MAX_DECEL = 0.5 # Max deceleration
FREQUENCY = 50 # 50Hz
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
# Adding other member variables needed
self.pose = None
#self.velocity = None
self.base_waypoints = None
self.waypoints_tree = None
self.stop_line_wp_idx = -1
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
# TODO: Add a subscriber for /obstacle_waypoint below
self.loop()
def loop(self):
rate = rospy.Rate(FREQUENCY)
while not rospy.is_shutdown():
if self.pose and self.base_waypoints and self.waypoints_tree:
# Getting the final waypoints
final_waypoints = self.get_final_waypoints()
self.publish_waypoints(final_waypoints)
rate.sleep()
def publish_waypoints(self, final_waypoints):
lane = Lane()
lane.header = self.base_waypoints.header
lane.waypoints = final_waypoints
self.final_waypoints_pub.publish(lane)
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.base_waypoints = waypoints
self.waypoints_tree = KDTree(
[[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y]
for waypoint in waypoints.waypoints])
def traffic_cb(self, msg):
# Callback for /traffic_waypoint message.
self.stop_line_wp_idx = msg.data
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def get_closest_waypoint_idx(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.waypoints_tree.query([x, y], 1)[1]
# Checking if closest point is ahead or behind the vehicle
closest_waypoint = self.base_waypoints.waypoints[closest_idx]
prev_waypoint = self.base_waypoints.waypoints[
(closest_idx - 1) if closest_idx > 0 else (len(self.base_waypoints.waypoints) - 1)]
closest_coord = [closest_waypoint.pose.pose.position.x, closest_waypoint.pose.pose.position.y]
prev_coord = [prev_waypoint.pose.pose.position.x, prev_waypoint.pose.pose.position.y]
# Equation for hyperplane through closest coords
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.base_waypoints.waypoints)
return closest_idx
def get_final_waypoints(self):
closest_idx = self.get_closest_waypoint_idx()
# We want the car to stop at the end of the track, so not doing module
farthest_idx = min(closest_idx + LOOKAHEAD_WPS, len(self.base_waypoints.waypoints))
if self.stop_line_wp_idx == -1 or self.stop_line_wp_idx >= farthest_idx :
# If there is no red traffic light ahead, just adding next selected waypoint
return self.move_to_next_waypoint(closest_idx, farthest_idx)
else:
# If there is a red traffic light ahead, modifying the waypoints velocity to gradually stop
return self.stop_when_red(closest_idx, farthest_idx)
def move_to_next_waypoint(self, closest_idx, farthest_idx):
final_waypoints = []
for i in LOOKAHEAD_FILTER:
idx = closest_idx + i
if idx < farthest_idx:
wp = self.base_waypoints.waypoints[idx]
final_waypoints.append(wp)
return final_waypoints
def stop_when_red(self, closest_idx, farthest_idx):
final_waypoints = []
# Index of the closest waypoint point before the stop line of the traffic light
stop_idx = max(self.stop_line_wp_idx - CENTER_TO_LANE_BUFFER, closest_idx)
target_wp = self.base_waypoints.waypoints[stop_idx]
dist = 0.0
for i in LOOKAHEAD_FILTER[::-1]:
# For each one of the selected waypoints (starting from the farthest one),
# calculating the distance to the stop line and adjust the velocity in order to gradually stop
idx = closest_idx + i
if idx < farthest_idx:
wp = self.base_waypoints.waypoints[idx]
p = Waypoint()
p.pose = wp.pose
vel = 0.0
if idx < stop_idx:
# Calculating the distance from the stop line to the current waypoint
dist += math.sqrt((target_wp.pose.pose.position.x - wp.pose.pose.position.x)**2 +
(target_wp.pose.pose.position.y - wp.pose.pose.position.y)**2)
# Reducing the velocity according to the max acceleration
vel = math.sqrt(2 * MAX_DECEL * dist)
if vel < 1.0:
vel = 0.0
p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
final_waypoints.insert(0, p)
return final_waypoints
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| [
"rospy.logerr",
"rospy.Subscriber",
"math.sqrt",
"styx_msgs.msg.Lane",
"rospy.Publisher",
"rospy.Rate",
"rospy.is_shutdown",
"numpy.array",
"rospy.init_node",
"scipy.spatial.KDTree",
"numpy.dot",
"styx_msgs.msg.Waypoint"
] | [((1158, 1193), 'rospy.init_node', 'rospy.init_node', (['"""waypoint_updater"""'], {}), "('waypoint_updater')\n", (1173, 1193), False, 'import rospy\n'), ((1438, 1492), 'rospy.Publisher', 'rospy.Publisher', (['"""final_waypoints"""', 'Lane'], {'queue_size': '(1)'}), "('final_waypoints', Lane, queue_size=1)\n", (1453, 1492), False, 'import rospy\n'), ((1502, 1562), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/current_pose"""', 'PoseStamped', 'self.pose_cb'], {}), "('/current_pose', PoseStamped, self.pose_cb)\n", (1518, 1562), False, 'import rospy\n'), ((1571, 1631), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/base_waypoints"""', 'Lane', 'self.waypoints_cb'], {}), "('/base_waypoints', Lane, self.waypoints_cb)\n", (1587, 1631), False, 'import rospy\n'), ((1640, 1701), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/traffic_waypoint"""', 'Int32', 'self.traffic_cb'], {}), "('/traffic_waypoint', Int32, self.traffic_cb)\n", (1656, 1701), False, 'import rospy\n'), ((1822, 1843), 'rospy.Rate', 'rospy.Rate', (['FREQUENCY'], {}), '(FREQUENCY)\n', (1832, 1843), False, 'import rospy\n'), ((2211, 2217), 'styx_msgs.msg.Lane', 'Lane', ([], {}), '()\n', (2215, 2217), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((2518, 2630), 'scipy.spatial.KDTree', 'KDTree', (['[[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for\n waypoint in waypoints.waypoints]'], {}), '([[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for\n waypoint in waypoints.waypoints])\n', (2524, 2630), False, 'from scipy.spatial import KDTree\n'), ((3752, 3775), 'numpy.array', 'np.array', (['closest_coord'], {}), '(closest_coord)\n', (3760, 3775), True, 'import numpy as np\n'), ((3796, 3816), 'numpy.array', 'np.array', (['prev_coord'], {}), '(prev_coord)\n', (3804, 3816), True, 'import numpy as np\n'), ((3836, 3852), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (3844, 3852), True, 'import numpy as np\n'), ((3868, 3915), 'numpy.dot', 'np.dot', (['(cl_vect - prev_vect)', '(pos_vect - cl_vect)'], {}), '(cl_vect - prev_vect, pos_vect - cl_vect)\n', (3874, 3915), True, 'import numpy as np\n'), ((1862, 1881), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (1879, 1881), False, 'import rospy\n'), ((6647, 6701), 'rospy.logerr', 'rospy.logerr', (['"""Could not start waypoint updater node."""'], {}), "('Could not start waypoint updater node.')\n", (6659, 6701), False, 'import rospy\n'), ((5780, 5790), 'styx_msgs.msg.Waypoint', 'Waypoint', ([], {}), '()\n', (5788, 5790), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((6004, 6148), 'math.sqrt', 'math.sqrt', (['((target_wp.pose.pose.position.x - wp.pose.pose.position.x) ** 2 + (\n target_wp.pose.pose.position.y - wp.pose.pose.position.y) ** 2)'], {}), '((target_wp.pose.pose.position.x - wp.pose.pose.position.x) ** 2 +\n (target_wp.pose.pose.position.y - wp.pose.pose.position.y) ** 2)\n', (6013, 6148), False, 'import math\n'), ((6283, 6314), 'math.sqrt', 'math.sqrt', (['(2 * MAX_DECEL * dist)'], {}), '(2 * MAX_DECEL * dist)\n', (6292, 6314), False, 'import math\n')] |
import cv2
import ctypes
import numpy as np
from ctypes import cdll
import matplotlib.pyplot as plt
if __name__ == '__main__':
mg_cv = cv2.imread("test.JPG")
gray=cv2.cvtColor(mg_cv,cv2.COLOR_BGR2GRAY)
n = gray.shape[0]
m = gray.shape[1]
res = np.empty((gray.shape[0], gray.shape[1]), dtype='float64')
gray = gray.astype('float64') # double
kr = 16
ks = 4
ko = 8
S = 3
sigma_init = 0.5
contrast_threshold = 0.03
edge_response_threshold = 10.0
max_iterpolation = 10
time_arr4 = np.empty(4, dtype='float64')
test = cdll.LoadLibrary("./lib/siftshare.so")
test.sift.argtypes = [np.ctypeslib.ndpointer(dtype=gray.dtype, ndim=2, shape=gray.shape, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(dtype=res.dtype, ndim=2, shape=res.shape, flags='C_CONTIGUOUS'),
ctypes.c_int, # n
ctypes.c_int, # m
ctypes.c_int, # kr
ctypes.c_int, # ks
ctypes.c_int, # ko
ctypes.c_int, # S
ctypes.c_double, # sigma_init
ctypes.c_double, # contrast_threshold
ctypes.c_double, # edge_response_threshold
ctypes.c_int, # max_iterpolation
np.ctypeslib.ndpointer(dtype=time_arr4.dtype, ndim=1, shape=time_arr4.shape)
]
test.sift(gray, res, n, m, kr, ks, ko, S,
sigma_init, contrast_threshold,
edge_response_threshold, max_iterpolation, time_arr4)
plt.imshow(res, cmap='gray')
plt.savefig("res/share.png") | [
"numpy.ctypeslib.ndpointer",
"cv2.cvtColor",
"numpy.empty",
"matplotlib.pyplot.imshow",
"ctypes.cdll.LoadLibrary",
"cv2.imread",
"matplotlib.pyplot.savefig"
] | [((140, 162), 'cv2.imread', 'cv2.imread', (['"""test.JPG"""'], {}), "('test.JPG')\n", (150, 162), False, 'import cv2\n'), ((172, 211), 'cv2.cvtColor', 'cv2.cvtColor', (['mg_cv', 'cv2.COLOR_BGR2GRAY'], {}), '(mg_cv, cv2.COLOR_BGR2GRAY)\n', (184, 211), False, 'import cv2\n'), ((265, 322), 'numpy.empty', 'np.empty', (['(gray.shape[0], gray.shape[1])'], {'dtype': '"""float64"""'}), "((gray.shape[0], gray.shape[1]), dtype='float64')\n", (273, 322), True, 'import numpy as np\n'), ((544, 572), 'numpy.empty', 'np.empty', (['(4)'], {'dtype': '"""float64"""'}), "(4, dtype='float64')\n", (552, 572), True, 'import numpy as np\n'), ((589, 627), 'ctypes.cdll.LoadLibrary', 'cdll.LoadLibrary', (['"""./lib/siftshare.so"""'], {}), "('./lib/siftshare.so')\n", (605, 627), False, 'from ctypes import cdll\n'), ((1732, 1760), 'matplotlib.pyplot.imshow', 'plt.imshow', (['res'], {'cmap': '"""gray"""'}), "(res, cmap='gray')\n", (1742, 1760), True, 'import matplotlib.pyplot as plt\n'), ((1765, 1793), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""res/share.png"""'], {}), "('res/share.png')\n", (1776, 1793), True, 'import matplotlib.pyplot as plt\n'), ((654, 747), 'numpy.ctypeslib.ndpointer', 'np.ctypeslib.ndpointer', ([], {'dtype': 'gray.dtype', 'ndim': '(2)', 'shape': 'gray.shape', 'flags': '"""C_CONTIGUOUS"""'}), "(dtype=gray.dtype, ndim=2, shape=gray.shape, flags=\n 'C_CONTIGUOUS')\n", (676, 747), True, 'import numpy as np\n'), ((775, 866), 'numpy.ctypeslib.ndpointer', 'np.ctypeslib.ndpointer', ([], {'dtype': 'res.dtype', 'ndim': '(2)', 'shape': 'res.shape', 'flags': '"""C_CONTIGUOUS"""'}), "(dtype=res.dtype, ndim=2, shape=res.shape, flags=\n 'C_CONTIGUOUS')\n", (797, 866), True, 'import numpy as np\n'), ((1458, 1534), 'numpy.ctypeslib.ndpointer', 'np.ctypeslib.ndpointer', ([], {'dtype': 'time_arr4.dtype', 'ndim': '(1)', 'shape': 'time_arr4.shape'}), '(dtype=time_arr4.dtype, ndim=1, shape=time_arr4.shape)\n', (1480, 1534), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import json
import random
import warnings
from datetime import datetime
from pathlib import Path
import numpy
from PIL import Image, ImageEnhance
from samples.misc.synthesis.mask_json_utilities import MaskJsonUtils
from tqdm import tqdm
class ImageComposition:
""" Composes images together in random ways, applying transformations to the foreground to create a
synthetic
combined image.
"""
def __init__(self):
self.allowed_output_types = [".png", ".jpg", ".jpeg"]
self.allowed_background_types = [".png", ".jpg", ".jpeg"]
self.zero_padding = 8 # 00000027.png, supports up to 100 million images
self.max_foregrounds = 3
self.mask_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
assert (
len(self.mask_colors) >= self.max_foregrounds
), "length of mask_colors should be >= max_foregrounds"
def _validate_and_process_args(self, args):
# Validates input arguments and sets up class variables
# Args:
# args: the ArgumentParser command line arguments
self.silent = args.silent
# Validate the count
assert args.count > 0, "count must be greater than 0"
self.count = args.count
# Validate the width and height
assert args._width >= 64, "width must be greater than 64"
self.width = args._width
assert args._height >= 64, "height must be greater than 64"
self.height = args._height
# Validate and process the output type
if args.output_type is None:
self.output_type = ".jpg" # default
else:
if args.output_type[0] != ".":
self.output_type = f".{args.output_type}"
assert self.output_type in self.allowed_output_types, (
f"output_type is not supported: " f"{self.output_type}"
)
# Validate and process output and input directories
self._validate_and_process_output_directory()
self._validate_and_process_input_directory()
def _validate_and_process_output_directory(self):
self.output_dir = Path(config.output_dir)
self.images_output_dir = self.output_dir / "images"
self.masks_output_dir = self.output_dir / "masks"
# Create directories
self.output_dir.mkdir(exist_ok=True)
self.images_output_dir.mkdir(exist_ok=True)
self.masks_output_dir.mkdir(exist_ok=True)
if not self.silent:
# Check for existing contents in the images directory
for _ in self.images_output_dir.iterdir():
# We found something, check if the user wants to overwrite files or quit
should_continue = input(
"output_dir is not empty, files may be overwritten.\nContinue (y/n)? "
).lower()
if should_continue != "y" and should_continue != "yes":
quit()
break
def _validate_and_process_input_directory(self):
self.input_dir = Path(config.input_dir)
assert self.input_dir.exists(), f"input_dir does not exist: {config.input_dir}"
for x in self.input_dir.iterdir():
if x.name == "foregrounds":
self.foregrounds_dir = x
elif x.name == "backgrounds":
self.backgrounds_dir = x
assert (
self.foregrounds_dir is not None
), "foregrounds sub-directory was not found in the input_dir"
assert (
self.backgrounds_dir is not None
), "backgrounds sub-directory was not found in the input_dir"
self._validate_and_process_foregrounds()
self._validate_and_process_backgrounds()
def _validate_and_process_foregrounds(self):
# Validates input foregrounds and processes them into a foregrounds dictionary.
# Expected directory structure:
# + foregrounds_dir
# + super_category_dir
# + category_dir
# + foreground_image.png
self.foregrounds_dict = dict()
for super_category_dir in self.foregrounds_dir.iterdir():
if not super_category_dir.is_dir():
warnings.warn(
f"file found in foregrounds directory (expected super-category directories), ignoring: "
f"{super_category_dir}"
)
continue
# This is a super category directory
for category_dir in super_category_dir.iterdir():
if not category_dir.is_dir():
warnings.warn(
f"file found in super category directory (expected category directories), ignoring: "
f"{category_dir}"
)
continue
# This is a category directory
for image_file in category_dir.iterdir():
if not image_file.is_file():
warnings.warn(
f"a directory was found inside a category directory, ignoring: {str(image_file)}"
)
continue
if image_file.suffix != ".png":
warnings.warn(
f"foreground must be a .png file, skipping: {str(image_file)}"
)
continue
# Valid foreground image, add to foregrounds_dict
super_category = super_category_dir.name
category = category_dir.name
if super_category not in self.foregrounds_dict:
self.foregrounds_dict[super_category] = dict()
if category not in self.foregrounds_dict[super_category]:
self.foregrounds_dict[super_category][category] = []
self.foregrounds_dict[super_category][category].append(image_file)
assert len(self.foregrounds_dict) > 0, "no valid foregrounds were found"
def _validate_and_process_backgrounds(self):
self.backgrounds = []
for image_file in self.backgrounds_dir.iterdir():
if not image_file.is_file():
warnings.warn(
f"a directory was found inside the backgrounds directory, ignoring: {image_file}"
)
continue
if image_file.suffix not in self.allowed_background_types:
warnings.warn(
f"background must match an accepted type {str(self.allowed_background_types)}, ignoring: "
f"{image_file}"
)
continue
# Valid file, add to backgrounds list
self.backgrounds.append(image_file)
assert len(self.backgrounds) > 0, "no valid backgrounds were found"
def _generate_images(self):
# Generates a number of images and creates segmentation masks, then
# saves a mask_definitions.json file that describes the dataset.
print(f"Generating {self.count} images with masks...")
mju = MaskJsonUtils(self.output_dir)
# Create all images/masks (with tqdm to have a progress bar)
for i in tqdm(range(self.count)):
# Randomly choose a background
background_path = random.choice(self.backgrounds)
num_foregrounds = random.randint(1, self.max_foregrounds)
foregrounds = []
for fg_i in range(num_foregrounds):
# Randomly choose a foreground
super_category = random.choice(list(self.foregrounds_dict.keys()))
category = random.choice(
list(self.foregrounds_dict[super_category].keys())
)
foreground_path = random.choice(
self.foregrounds_dict[super_category][category]
)
# Get the color
mask_rgb_color = self.mask_colors[fg_i]
foregrounds.append(
{
"super_category": super_category,
"category": category,
"foreground_path": foreground_path,
"mask_rgb_color": mask_rgb_color,
}
)
# Compose foregrounds and background
composite, mask = self._compose_images(foregrounds, background_path)
# Create the file name (used for both composite and mask)
save_filename = f"{i:0{self.zero_padding}}" # e.g. 00000023.jpg
# Save composite image to the images sub-directory
composite_filename = (
f"{save_filename}{self.output_type}" # e.g. 00000023.jpg
)
composite_path = self.output_dir / "images" / composite_filename # e.g.
# my_output_dir/images/00000023.jpg
composite = composite.convert("RGB") # remove alpha
composite.save(composite_path)
# Save the mask image to the masks sub-directory
mask_filename = f"{save_filename}.png" # masks are always png to avoid lossy compression
mask_path = (
self.output_dir / "masks" / mask_filename
) # e.g. my_output_dir/masks/00000023.png
mask.save(mask_path)
color_categories = dict()
for fg in foregrounds:
# Add category and color info
mju.add_category(fg["category"], fg["super_category"])
color_categories[str(fg["mask_rgb_color"])] = {
"category": fg["category"],
"super_category": fg["super_category"],
}
# Add the mask to MaskJsonUtils
mju.add_mask(
composite_path.relative_to(self.output_dir).as_posix(),
mask_path.relative_to(self.output_dir).as_posix(),
color_categories,
)
# Write masks to json
mju.write_masks_to_json()
def _compose_images(self, foregrounds, background_path):
# Composes a foreground image and a background image and creates a segmentation mask
# using the specified color. Validation should already be done by now.
# Args:
# foregrounds: a list of dicts with format:
# [{
# 'super_category':super_category,
# 'category':category,
# 'foreground_path':foreground_path,
# 'mask_rgb_color':mask_rgb_color
# },...]
# background_path: the path to a valid background image
# Returns:
# composite: the composed image
# mask: the mask image
# Open background and convert to RGBA
background = Image.open(background_path)
background = background.convert("RGBA")
# Crop background to desired size (self.width x self.height), randomly positioned
bg_width, bg_height = background.size
max_crop_x_pos = bg_width - self.width
max_crop_y_pos = bg_height - self.height
assert max_crop_x_pos >= 0, (
f"desired width, {self.width}, is greater than background width, "
f"{bg_width}, for {str(background_path)}"
)
assert max_crop_y_pos >= 0, (
f"desired height, {self.height}, is greater than backgrou"
f"nd height, {bg_height}, for {str(background_path)}"
)
crop_x_pos = random.randint(0, max_crop_x_pos)
crop_y_pos = random.randint(0, max_crop_y_pos)
composite = background.crop(
(crop_x_pos, crop_y_pos, crop_x_pos + self.width, crop_y_pos + self.height)
)
composite_mask = Image.new("RGB", composite.size, 0)
for fg in foregrounds:
fg_path = fg["foreground_path"]
# Perform transformations
fg_image = self._transform_foreground(fg, fg_path)
# Choose a random x,y position for the foreground
max_x_position = composite.size[0] - fg_image.size[0]
max_y_position = composite.size[1] - fg_image.size[1]
assert max_x_position >= 0 and max_y_position >= 0, (
f"foreground {fg_path} is too big ({fg_image.size[0]}x{fg_image.size[1]}) for the requeste"
f"d output size ({self.width}x{self.height}), check your input parameters"
)
paste_position = (
random.randint(0, max_x_position),
random.randint(0, max_y_position),
)
# Create a new foreground image as large as the composite and paste it on top
new_fg_image = Image.new("RGBA", composite.size, color=(0, 0, 0, 0))
new_fg_image.paste(fg_image, paste_position)
# Extract the alpha channel from the foreground and paste it into a new image the size of the composite
alpha_mask = fg_image.getchannel(3)
new_alpha_mask = Image.new("L", composite.size, color=0)
new_alpha_mask.paste(alpha_mask, paste_position)
composite = Image.composite(new_fg_image, composite, new_alpha_mask)
# Grab the alpha pixels above a specified threshold
alpha_threshold = 200
mask_arr = numpy.array(
numpy.greater(numpy.array(new_alpha_mask), alpha_threshold), dtype=numpy.uint8
)
uint8_mask = numpy.uint8(mask_arr) # This is composed of 1s and 0s
# Multiply the mask value (1 or 0) by the color in each RGB channel and combine to get the mask
mask_rgb_color = fg["mask_rgb_color"]
red_channel = uint8_mask * mask_rgb_color[0]
green_channel = uint8_mask * mask_rgb_color[1]
blue_channel = uint8_mask * mask_rgb_color[2]
rgb_mask_arr = numpy.dstack((red_channel, green_channel, blue_channel))
isolated_mask = Image.fromarray(rgb_mask_arr, "RGB")
isolated_alpha = Image.fromarray(uint8_mask * 255, "L")
composite_mask = Image.composite(
isolated_mask, composite_mask, isolated_alpha
)
return composite, composite_mask
def _transform_foreground(self, fg, fg_path):
# Open foreground and get the alpha channel
fg_image = Image.open(fg_path)
fg_alpha = numpy.array(fg_image.getchannel(3))
assert numpy.any(
fg_alpha == 0
), f"foreground needs to have some transparency: {str(fg_path)}"
# ** Apply Transformations **
# Rotate the foreground
angle_degrees = random.randint(0, 359)
fg_image = fg_image.rotate(angle_degrees, resample=Image.BICUBIC, expand=True)
# Scale the foreground
scale = random.random() * 0.5 + 0.5 # Pick something between .5 and 1
new_size = (int(fg_image.size[0] * scale), int(fg_image.size[1] * scale))
fg_image = fg_image.resize(new_size, resample=Image.BICUBIC)
# Adjust foreground brightness
brightness_factor = (
random.random() * 0.4 + 0.7
) # Pick something between .7 and 1.1
enhancer = ImageEnhance.Brightness(fg_image)
fg_image = enhancer.enhance(brightness_factor)
# Add any other transformations here...
return fg_image
def _create_info(self):
# A convenience wizard for automatically creating dataset info
# The user can always modify the resulting .json manually if needed
if self.silent:
# No user wizard in silent mode
return
should_continue = input(
"Would you like to create dataset info json? (y/n) "
).lower()
if should_continue != "y" and should_continue != "yes":
print("No problem. You can always create the json manually.")
quit()
print(
"Note: you can always modify the json manually if you need to update this."
)
info = dict()
info["description"] = input("Description: ")
info["url"] = input("URL: ")
info["version"] = input("Version: ")
info["contributor"] = input("Contributor: ")
now = datetime.now()
info["year"] = now.year
info["date_created"] = f"{now.month:0{2}}/{now.day:0{2}}/{now.year}"
image_license = dict()
image_license["id"] = 0
should_add_license = input("Add an image license? (y/n) ").lower()
if should_add_license != "y" and should_add_license != "yes":
image_license["url"] = ""
image_license["name"] = "None"
else:
image_license["name"] = input("License name: ")
image_license["url"] = input("License URL: ")
dataset_info = dict()
dataset_info["info"] = info
dataset_info["license"] = image_license
# Write the JSON output file
output_file_path = Path(self.output_dir) / "dataset_info.json"
with open(output_file_path, "w+") as json_file:
json_file.write(json.dumps(dataset_info))
print("Successfully created {output_file_path}")
def __call__(self, args):
self._validate_and_process_args(args)
self._generate_images()
self._create_info()
print("Image composition completed.")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Image Composition")
parser.add_argument(
"--input_dir",
type=str,
dest="input_dir",
required=True,
help="The input directory. \
This contains a 'backgrounds' directory of pngs or jpgs, and a 'foregrounds' "
"directory which \
contains supercategory directories (e.g. 'animal', 'vehicle'), each of which "
"contain category \
directories (e.g. 'horse', 'bear'). Each category directory contains png images of "
"that item on a \
transparent background (e.g. a grizzly bear on a transparent background).",
)
parser.add_argument(
"--output_dir",
type=str,
dest="output_dir",
required=True,
help="The directory where "
"images, masks, \
and json files will be placed",
)
parser.add_argument(
"--count",
type=int,
dest="count",
required=True,
help="number of composed images to create",
)
parser.add_argument(
"--width",
type=int,
dest="width",
required=True,
help="output image pixel width",
)
parser.add_argument(
"--height",
type=int,
dest="height",
required=True,
help="output image pixel height",
)
parser.add_argument(
"--output_type", type=str, dest="output_type", help="png or jpg (default)"
)
parser.add_argument(
"--silent",
action="store_true",
help="silent mode; doesn't prompt the user for input, \
automatically overwrites files",
)
config = parser.parse_args()
image_comp = ImageComposition()
image_comp(config)
| [
"numpy.dstack",
"PIL.Image.new",
"PIL.ImageEnhance.Brightness",
"numpy.uint8",
"random.randint",
"argparse.ArgumentParser",
"PIL.Image.composite",
"random.choice",
"json.dumps",
"PIL.Image.open",
"numpy.any",
"random.random",
"pathlib.Path",
"numpy.array",
"PIL.Image.fromarray",
"sampl... | [((17523, 17579), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Image Composition"""'}), "(description='Image Composition')\n", (17546, 17579), False, 'import argparse\n'), ((2144, 2167), 'pathlib.Path', 'Path', (['config.output_dir'], {}), '(config.output_dir)\n', (2148, 2167), False, 'from pathlib import Path\n'), ((3061, 3083), 'pathlib.Path', 'Path', (['config.input_dir'], {}), '(config.input_dir)\n', (3065, 3083), False, 'from pathlib import Path\n'), ((7178, 7208), 'samples.misc.synthesis.mask_json_utilities.MaskJsonUtils', 'MaskJsonUtils', (['self.output_dir'], {}), '(self.output_dir)\n', (7191, 7208), False, 'from samples.misc.synthesis.mask_json_utilities import MaskJsonUtils\n'), ((10910, 10937), 'PIL.Image.open', 'Image.open', (['background_path'], {}), '(background_path)\n', (10920, 10937), False, 'from PIL import Image, ImageEnhance\n'), ((11606, 11639), 'random.randint', 'random.randint', (['(0)', 'max_crop_x_pos'], {}), '(0, max_crop_x_pos)\n', (11620, 11639), False, 'import random\n'), ((11661, 11694), 'random.randint', 'random.randint', (['(0)', 'max_crop_y_pos'], {}), '(0, max_crop_y_pos)\n', (11675, 11694), False, 'import random\n'), ((11855, 11890), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'composite.size', '(0)'], {}), "('RGB', composite.size, 0)\n", (11864, 11890), False, 'from PIL import Image, ImageEnhance\n'), ((14456, 14475), 'PIL.Image.open', 'Image.open', (['fg_path'], {}), '(fg_path)\n', (14466, 14475), False, 'from PIL import Image, ImageEnhance\n'), ((14546, 14570), 'numpy.any', 'numpy.any', (['(fg_alpha == 0)'], {}), '(fg_alpha == 0)\n', (14555, 14570), False, 'import numpy\n'), ((14751, 14773), 'random.randint', 'random.randint', (['(0)', '(359)'], {}), '(0, 359)\n', (14765, 14773), False, 'import random\n'), ((15299, 15332), 'PIL.ImageEnhance.Brightness', 'ImageEnhance.Brightness', (['fg_image'], {}), '(fg_image)\n', (15322, 15332), False, 'from PIL import Image, ImageEnhance\n'), ((16338, 16352), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16350, 16352), False, 'from datetime import datetime\n'), ((7394, 7425), 'random.choice', 'random.choice', (['self.backgrounds'], {}), '(self.backgrounds)\n', (7407, 7425), False, 'import random\n'), ((7457, 7496), 'random.randint', 'random.randint', (['(1)', 'self.max_foregrounds'], {}), '(1, self.max_foregrounds)\n', (7471, 7496), False, 'import random\n'), ((12808, 12861), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'composite.size'], {'color': '(0, 0, 0, 0)'}), "('RGBA', composite.size, color=(0, 0, 0, 0))\n", (12817, 12861), False, 'from PIL import Image, ImageEnhance\n'), ((13113, 13152), 'PIL.Image.new', 'Image.new', (['"""L"""', 'composite.size'], {'color': '(0)'}), "('L', composite.size, color=0)\n", (13122, 13152), False, 'from PIL import Image, ImageEnhance\n'), ((13238, 13294), 'PIL.Image.composite', 'Image.composite', (['new_fg_image', 'composite', 'new_alpha_mask'], {}), '(new_fg_image, composite, new_alpha_mask)\n', (13253, 13294), False, 'from PIL import Image, ImageEnhance\n'), ((13564, 13585), 'numpy.uint8', 'numpy.uint8', (['mask_arr'], {}), '(mask_arr)\n', (13575, 13585), False, 'import numpy\n'), ((13979, 14035), 'numpy.dstack', 'numpy.dstack', (['(red_channel, green_channel, blue_channel)'], {}), '((red_channel, green_channel, blue_channel))\n', (13991, 14035), False, 'import numpy\n'), ((14064, 14100), 'PIL.Image.fromarray', 'Image.fromarray', (['rgb_mask_arr', '"""RGB"""'], {}), "(rgb_mask_arr, 'RGB')\n", (14079, 14100), False, 'from PIL import Image, ImageEnhance\n'), ((14130, 14168), 'PIL.Image.fromarray', 'Image.fromarray', (['(uint8_mask * 255)', '"""L"""'], {}), "(uint8_mask * 255, 'L')\n", (14145, 14168), False, 'from PIL import Image, ImageEnhance\n'), ((14199, 14261), 'PIL.Image.composite', 'Image.composite', (['isolated_mask', 'composite_mask', 'isolated_alpha'], {}), '(isolated_mask, composite_mask, isolated_alpha)\n', (14214, 14261), False, 'from PIL import Image, ImageEnhance\n'), ((17065, 17086), 'pathlib.Path', 'Path', (['self.output_dir'], {}), '(self.output_dir)\n', (17069, 17086), False, 'from pathlib import Path\n'), ((4234, 4367), 'warnings.warn', 'warnings.warn', (['f"""file found in foregrounds directory (expected super-category directories), ignoring: {super_category_dir}"""'], {}), "(\n f'file found in foregrounds directory (expected super-category directories), ignoring: {super_category_dir}'\n )\n", (4247, 4367), False, 'import warnings\n'), ((6288, 6394), 'warnings.warn', 'warnings.warn', (['f"""a directory was found inside the backgrounds directory, ignoring: {image_file}"""'], {}), "(\n f'a directory was found inside the backgrounds directory, ignoring: {image_file}'\n )\n", (6301, 6394), False, 'import warnings\n'), ((7869, 7931), 'random.choice', 'random.choice', (['self.foregrounds_dict[super_category][category]'], {}), '(self.foregrounds_dict[super_category][category])\n', (7882, 7931), False, 'import random\n'), ((12590, 12623), 'random.randint', 'random.randint', (['(0)', 'max_x_position'], {}), '(0, max_x_position)\n', (12604, 12623), False, 'import random\n'), ((12641, 12674), 'random.randint', 'random.randint', (['(0)', 'max_y_position'], {}), '(0, max_y_position)\n', (12655, 12674), False, 'import random\n'), ((14909, 14924), 'random.random', 'random.random', ([], {}), '()\n', (14922, 14924), False, 'import random\n'), ((15205, 15220), 'random.random', 'random.random', ([], {}), '()\n', (15218, 15220), False, 'import random\n'), ((17193, 17217), 'json.dumps', 'json.dumps', (['dataset_info'], {}), '(dataset_info)\n', (17203, 17217), False, 'import json\n'), ((4623, 4747), 'warnings.warn', 'warnings.warn', (['f"""file found in super category directory (expected category directories), ignoring: {category_dir}"""'], {}), "(\n f'file found in super category directory (expected category directories), ignoring: {category_dir}'\n )\n", (4636, 4747), False, 'import warnings\n'), ((13460, 13487), 'numpy.array', 'numpy.array', (['new_alpha_mask'], {}), '(new_alpha_mask)\n', (13471, 13487), False, 'import numpy\n')] |
import trimesh
import numpy as np
import copy
from pychop3d.configuration import Configuration
from pychop3d import bsp_node
class BSPTree:
def __init__(self, part):
"""start a new BSPTree from a single part / object
:param part: original part / object to split
:type part: `trimesh.Trimesh`
"""
config = Configuration.config # collect configuration
self.nodes = [bsp_node.BSPNode(part)] # create root node and the list of nodes
# calculate initial nparts objective
nparts = 1 # nparts = sum([l.n_parts for l in self.leaves]) / nparts_original --> measures part reduction
# calculate initial utilization objective --> measures how much of the parts fill their oriented bounding boxes
V = np.prod(config.printer_extents)
if config.obb_utilization:
utilization = 1 - self.nodes[0].obb.volume / (self.nodes[0].n_parts * V)
else:
utilization = 1 - self.nodes[0].part.volume / (self.nodes[0].n_parts * V)
# create objectives dictionary
self.objectives = {
'nparts': nparts,
'utilization': utilization,
'connector': 0, # no connectors yet
'fragility': 0,
'seam': 0,
'symmetry': 0
}
def copy(self):
"""copy function in case I ever want to do something more complicated or specific than deepcopy
:return: copy of this tree
:rtype: `BSPTree`
"""
new_tree = copy.deepcopy(self)
return new_tree
def get_node(self, path=None):
"""get node with path `path`, if no `path` is provided, get the root node. Paths are specified as follows:
- empty tuple ( ) is the root node
- every number in the tuple specified which of the nodes at each level of the tree to select, for example
(0, 1) means the 1st child of the 0th child of the root node
:param path: tuple specifying the path to the node.
:type path: tuple
:return: the node at path `path`
:rtype: `bsp_node.BSPNode`
"""
node = self.nodes[0] # get root node
if not path: # if no path specified, return the root node
return node
else:
for i in path: # descend the tree according to the path
node = node.children[i] # take the ith child at each tree level where i is in `path`
return node
@property
def leaves(self):
"""property containing the leaves of the tree. The leaves of the final BSP tree correspond to parts
small enough to fit in the printer
:return: list of the leaves of this tree
:rtype: list of `bsp_node.BSPNode`
"""
nodes = [self.nodes[0]] # collect root node, start list of nodes to check for leaves
leaves = [] # start list of leaves
while nodes:
node = nodes.pop() # take node out of list to check
if len(node.children) == 0: # check if the node has no children (this means it is a leaf)
leaves.append(node) # if so, add to list of leaves
else:
nodes += node.children # otherwise add children to list of nodes to check
return leaves
@property
def terminated(self):
"""property indicating whether all of this tree's nodes are small enough to be printed
:return: terminated
:rtype: bool
"""
# if any of the leaves are not terminated, the tree is not terminated
for leaf in self.leaves:
if not leaf.terminated:
return False
return True
@property
def largest_part(self):
"""property pointing to this trees largest part by number of parts
"""
# sort leaves by n_parts, give the last (highest) one
return sorted(self.leaves, key=lambda x: x.n_parts)[-1]
def different_from(self, tree, node):
"""determine if a node in this tree is different enough from a node in a different tree. The two trees will
be the same tree except one of the nodes will have a different cutting plane. This function checks if that
cutting plane is different from the corresponding cutting plane on the other tree. Two cutting planes are
considered different if their relative translation or rotation passes the corresponding thresholds.
:param tree: other tree to consider
:type tree: `bsp_tree.BSPTree`
:param node: which node on this tree to compare with the corresponding node on the other tree
:type node: `bsp_node.BSPNode`
:return: boolean indicating if the specified node is different between the trees
:rtype: bool
"""
self_node = self.get_node(node.path) # get the node on this tree
other_node = tree.get_node(node.path) # get the corresponding node on the other tree
# check if the node on this tree is different from the node on the other tree
return self_node.different_from(other_node)
def sufficiently_different(self, node, tree_set):
"""same as `bsp_tree.BSPTree.different_from` except this tree is compared to a list of other trees instead of
just one.
:param node: which node on this tree to compare with the corresponding node on the other trees
:type node: `bsp_node.BSPNode`
:param tree_set: list of other trees to consider
:type tree_set: list of `bsp_tree.BSPTree`
:return: boolean indicating if the specified node is different between this tree and all the other trees
:rtype: bool
"""
if not tree_set: # if the tree set is empty, then this tree is unique
return True
for tree in tree_set:
if not self.different_from(tree, node): # go through the tree set and call `different_from` on each one
self.different_from(tree, node)
return False
return True
@property
def objective(self):
"""calculates the weighted sum of the objective function components
:return: value of the objective function for this tree
:rtype: float
"""
config = Configuration.config
part = config.objective_weights['part'] * self.objectives['nparts']
util = config.objective_weights['utilization'] * self.objectives['utilization']
connector = config.objective_weights['connector'] * self.objectives['connector']
fragility = config.objective_weights['fragility'] * self.objectives['fragility']
seam = config.objective_weights['seam'] * self.objectives['seam']
symmetry = config.objective_weights['symmetry'] * self.objectives['symmetry']
return part + util + connector + fragility + seam + symmetry
def expand_node(tree, path, plane):
"""Splits a `tree` at the node given by `path` using `plane`. Returns a copy of the original tree but with the
split node
:param tree: tree to split
:type tree: `bsp_tree.BSPTree`
:param path: path pointing to the node to split
:type path: tuple
:param plane: splitting plane (origin, normal)
:type plane: tuple of (3, ) shape `numpy.ndarray`
:return: (split tree, result)
:rtype: (`bsp_tree.BSPTree`, str)
"""
new_tree = tree.copy() # copy input tree
new_node = new_tree.get_node(path) # collect the node of the copied tree according to `path`
new_node, result = bsp_node.split(new_node, plane) # attempt to split the node using `plane`
if result != 'success': # if not successful, `new_node` may be None
return None, result
new_tree.nodes += new_node.children # add `new_node`'s children to the `new_tree`'s list of nodes
return new_tree, result
def get_planes(part, normal):
"""get all planes in the form of (origin, normal) pairs corresponding to valid cuts of the input part. Planes are
in the direction specified by `normal` and are spaced according to the `plane_spacing` configuration parameter.
:param part: object to determine valid cutting planes for
:type part: `trimesh.Trimesh`
:param normal: unit vector defining the normal vector for the planes
:type normal: (3, ) shape `numpy.ndarray`
:return: list of all valid cutting planes for the input object
:rtype: list
"""
config = Configuration.config # collect configuration
projection = part.vertices @ normal # project all vertices of the input object onto the input normal vector
# determine the extent of the object in the direction defined by the normal vector
limits = [projection.min(), projection.max()]
# create planes spaced out according to the configuration
planes = [(d * normal, normal) for d in np.arange(limits[0], limits[1], config.plane_spacing)][1:]
if config.add_middle_plane: # add the middle plane
planes += [(normal * (projection.min() + projection.max()) / 2, normal)] # add a plane through the middle
return planes
| [
"copy.deepcopy",
"pychop3d.bsp_node.BSPNode",
"pychop3d.bsp_node.split",
"numpy.arange",
"numpy.prod"
] | [((7515, 7546), 'pychop3d.bsp_node.split', 'bsp_node.split', (['new_node', 'plane'], {}), '(new_node, plane)\n', (7529, 7546), False, 'from pychop3d import bsp_node\n'), ((780, 811), 'numpy.prod', 'np.prod', (['config.printer_extents'], {}), '(config.printer_extents)\n', (787, 811), True, 'import numpy as np\n'), ((1524, 1543), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (1537, 1543), False, 'import copy\n'), ((422, 444), 'pychop3d.bsp_node.BSPNode', 'bsp_node.BSPNode', (['part'], {}), '(part)\n', (438, 444), False, 'from pychop3d import bsp_node\n'), ((8811, 8864), 'numpy.arange', 'np.arange', (['limits[0]', 'limits[1]', 'config.plane_spacing'], {}), '(limits[0], limits[1], config.plane_spacing)\n', (8820, 8864), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a wav2letter++ dataset
"""
import argparse
import glob
import os
from shutil import copy
import h5py
import soundfile as sf
import numpy as np
import torch
from torch import nn
import tqdm
from fairseq.models.wav2vec import Wav2VecModel
def read_audio(fname):
"""Load an audio file and return PCM along with the sample rate"""
wav, sr = sf.read(fname)
assert sr == 16e3
return wav, 16e3
class PretrainedWav2VecModel(nn.Module):
def __init__(self, fname):
super().__init__()
checkpoint = torch.load(fname)
self.args = checkpoint["args"]
model = Wav2VecModel.build_model(self.args, None)
model.load_state_dict(checkpoint["model"])
model.eval()
self.model = model
def forward(self, x):
with torch.no_grad():
z = self.model.feature_extractor(x)
if isinstance(z, tuple):
z = z[0]
c = self.model.feature_aggregator(z)
return z, c
class EmbeddingWriterConfig(argparse.ArgumentParser):
def __init__(self):
super().__init__("Pre-compute embeddings for wav2letter++ datasets")
kwargs = {"action": "store", "type": str, "required": True}
self.add_argument("--input", "-i", help="Input Directory", **kwargs)
self.add_argument("--output", "-o", help="Output Directory", **kwargs)
self.add_argument("--model", help="Path to model checkpoint", **kwargs)
self.add_argument("--split", help="Dataset Splits", nargs="+", **kwargs)
self.add_argument(
"--ext", default="wav", required=False, help="Audio file extension"
)
self.add_argument(
"--no-copy-labels",
action="store_true",
help="Do not copy label files. Useful for large datasets, use --targetdir in wav2letter then.",
)
self.add_argument(
"--use-feat",
action="store_true",
help="Use the feature vector ('z') instead of context vector ('c') for features",
)
self.add_argument("--gpu", help="GPU to use", default=0, type=int)
class Prediction:
"""Lightweight wrapper around a fairspeech embedding model"""
def __init__(self, fname, gpu=0):
self.gpu = gpu
self.model = PretrainedWav2VecModel(fname).cuda(gpu)
def __call__(self, x):
x = torch.from_numpy(x).float().cuda(self.gpu)
with torch.no_grad():
z, c = self.model(x.unsqueeze(0))
return z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy()
class H5Writer:
"""Write features as hdf5 file in wav2letter++ compatible format"""
def __init__(self, fname):
self.fname = fname
os.makedirs(os.path.dirname(self.fname), exist_ok=True)
def write(self, data):
channel, T = data.shape
with h5py.File(self.fname, "w") as out_ds:
data = data.T.flatten()
out_ds["features"] = data
out_ds["info"] = np.array([16e3 // 160, T, channel])
class EmbeddingDatasetWriter(object):
"""Given a model and a wav2letter++ dataset, pre-compute and store embeddings
Args:
input_root, str :
Path to the wav2letter++ dataset
output_root, str :
Desired output directory. Will be created if non-existent
split, str :
Dataset split
"""
def __init__(
self,
input_root,
output_root,
split,
model_fname,
extension="wav",
gpu=0,
verbose=False,
use_feat=False,
):
assert os.path.exists(model_fname)
self.model_fname = model_fname
self.model = Prediction(self.model_fname, gpu)
self.input_root = input_root
self.output_root = output_root
self.split = split
self.verbose = verbose
self.extension = extension
self.use_feat = use_feat
assert os.path.exists(self.input_path), "Input path '{}' does not exist".format(
self.input_path
)
def _progress(self, iterable, **kwargs):
if self.verbose:
return tqdm.tqdm(iterable, **kwargs)
return iterable
def require_output_path(self, fname=None):
path = self.get_output_path(fname)
os.makedirs(path, exist_ok=True)
@property
def input_path(self):
return self.get_input_path()
@property
def output_path(self):
return self.get_output_path()
def get_input_path(self, fname=None):
if fname is None:
return os.path.join(self.input_root, self.split)
return os.path.join(self.get_input_path(), fname)
def get_output_path(self, fname=None):
if fname is None:
return os.path.join(self.output_root, self.split)
return os.path.join(self.get_output_path(), fname)
def copy_labels(self):
self.require_output_path()
labels = list(
filter(
lambda x: self.extension not in x, glob.glob(self.get_input_path("*"))
)
)
for fname in tqdm.tqdm(labels):
copy(fname, self.output_path)
@property
def input_fnames(self):
return sorted(glob.glob(self.get_input_path("*.{}".format(self.extension))))
def __len__(self):
return len(self.input_fnames)
def write_features(self):
paths = self.input_fnames
fnames_context = map(
lambda x: os.path.join(
self.output_path, x.replace("." + self.extension, ".h5context")
),
map(os.path.basename, paths),
)
for name, target_fname in self._progress(
zip(paths, fnames_context), total=len(self)
):
wav, sr = read_audio(name)
z, c = self.model(wav)
feat = z if self.use_feat else c
writer = H5Writer(target_fname)
writer.write(feat)
def __repr__(self):
return "EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})".format(
n_files=len(self), **self.__dict__
)
if __name__ == "__main__":
args = EmbeddingWriterConfig().parse_args()
for split in args.split:
writer = EmbeddingDatasetWriter(
input_root=args.input,
output_root=args.output,
split=split,
model_fname=args.model,
gpu=args.gpu,
extension=args.ext,
use_feat=args.use_feat,
)
print(writer)
writer.require_output_path()
print("Writing Features...")
writer.write_features()
print("Done.")
if not args.no_copy_labels:
print("Copying label data...")
writer.copy_labels()
print("Done.")
| [
"tqdm.tqdm",
"soundfile.read",
"h5py.File",
"os.makedirs",
"fairseq.models.wav2vec.Wav2VecModel.build_model",
"torch.load",
"os.path.dirname",
"os.path.exists",
"numpy.array",
"torch.no_grad",
"os.path.join",
"shutil.copy",
"torch.from_numpy"
] | [((600, 614), 'soundfile.read', 'sf.read', (['fname'], {}), '(fname)\n', (607, 614), True, 'import soundfile as sf\n'), ((782, 799), 'torch.load', 'torch.load', (['fname'], {}), '(fname)\n', (792, 799), False, 'import torch\n'), ((855, 896), 'fairseq.models.wav2vec.Wav2VecModel.build_model', 'Wav2VecModel.build_model', (['self.args', 'None'], {}), '(self.args, None)\n', (879, 896), False, 'from fairseq.models.wav2vec import Wav2VecModel\n'), ((3849, 3876), 'os.path.exists', 'os.path.exists', (['model_fname'], {}), '(model_fname)\n', (3863, 3876), False, 'import os\n'), ((4191, 4222), 'os.path.exists', 'os.path.exists', (['self.input_path'], {}), '(self.input_path)\n', (4205, 4222), False, 'import os\n'), ((4546, 4578), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (4557, 4578), False, 'import os\n'), ((5355, 5372), 'tqdm.tqdm', 'tqdm.tqdm', (['labels'], {}), '(labels)\n', (5364, 5372), False, 'import tqdm\n'), ((1037, 1052), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1050, 1052), False, 'import torch\n'), ((2675, 2690), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2688, 2690), False, 'import torch\n'), ((2978, 3005), 'os.path.dirname', 'os.path.dirname', (['self.fname'], {}), '(self.fname)\n', (2993, 3005), False, 'import os\n'), ((3096, 3122), 'h5py.File', 'h5py.File', (['self.fname', '"""w"""'], {}), "(self.fname, 'w')\n", (3105, 3122), False, 'import h5py\n'), ((3237, 3275), 'numpy.array', 'np.array', (['[16000.0 // 160, T, channel]'], {}), '([16000.0 // 160, T, channel])\n', (3245, 3275), True, 'import numpy as np\n'), ((4393, 4422), 'tqdm.tqdm', 'tqdm.tqdm', (['iterable'], {}), '(iterable, **kwargs)\n', (4402, 4422), False, 'import tqdm\n'), ((4825, 4866), 'os.path.join', 'os.path.join', (['self.input_root', 'self.split'], {}), '(self.input_root, self.split)\n', (4837, 4866), False, 'import os\n'), ((5014, 5056), 'os.path.join', 'os.path.join', (['self.output_root', 'self.split'], {}), '(self.output_root, self.split)\n', (5026, 5056), False, 'import os\n'), ((5386, 5415), 'shutil.copy', 'copy', (['fname', 'self.output_path'], {}), '(fname, self.output_path)\n', (5390, 5415), False, 'from shutil import copy\n'), ((2619, 2638), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (2635, 2638), False, 'import torch\n')] |
import ctypes
from collections import OrderedDict
from collections.abc import Iterable
from functools import reduce
from itertools import chain, combinations, groupby, product, zip_longest
from operator import attrgetter, mul
import types
import numpy as np
import sympy
from cgen import dtype_to_ctype as cgen_dtype_to_ctype
__all__ = ['prod', 'as_tuple', 'is_integer', 'generator', 'grouper', 'split', 'roundm',
'powerset', 'invert', 'flatten', 'single_or', 'filter_ordered', 'as_mapper',
'filter_sorted', 'dtype_to_cstr', 'dtype_to_ctype', 'dtype_to_mpitype',
'ctypes_to_cstr', 'ctypes_pointer', 'pprint', 'sweep', 'all_equal', 'as_list']
def prod(iterable, initial=1):
return reduce(mul, iterable, initial)
def as_list(item, type=None, length=None):
"""
Force item to a list.
"""
return list(as_tuple(item, type=type, length=length))
def as_tuple(item, type=None, length=None):
"""
Force item to a tuple.
Partly extracted from: https://github.com/OP2/PyOP2/.
"""
# Empty list if we get passed None
if item is None:
t = ()
elif isinstance(item, (str, sympy.Function)):
t = (item,)
else:
# Convert iterable to list...
try:
t = tuple(item)
# ... or create a list of a single item
except (TypeError, NotImplementedError):
t = (item,) * (length or 1)
if length and not len(t) == length:
raise ValueError("Tuple needs to be of length %d" % length)
if type and not all(isinstance(i, type) for i in t):
raise TypeError("Items need to be of type %s" % type)
return t
def as_mapper(iterable, key=None, get=None):
"""
Rearrange an iterable into a dictionary of lists in which keys are
produced by the function ``key``.
"""
key = key or (lambda i: i)
get = get or (lambda i: i)
mapper = {}
for i in iterable:
mapper.setdefault(key(i), []).append(get(i))
return mapper
def is_integer(value):
"""
A thorough instance comparison for all integer types.
"""
return isinstance(value, (int, np.integer, sympy.Integer))
def generator():
"""
Return a function ``f`` that generates integer numbers starting at 0
with stepping 1.
"""
def f():
ret = f.counter
f.counter += 1
return ret
f.counter = 0
return f
def grouper(iterable, n):
"""Split an interable into groups of size n, plus a reminder"""
args = [iter(iterable)] * n
return ([e for e in t if e is not None] for t in zip_longest(*args))
def all_equal(iterable):
"Returns True if all the elements are equal to each other"
g = groupby(iterable)
return next(g, True) and not next(g, False)
def split(iterable, f):
"""Split an iterable ``I`` into two iterables ``I1`` and ``I2`` of the
same type as ``I``. ``I1`` contains all elements ``e`` in ``I`` for
which ``f(e)`` returns True; ``I2`` is the complement of ``I1``."""
i1 = type(iterable)(i for i in iterable if f(i))
i2 = type(iterable)(i for i in iterable if not f(i))
return i1, i2
def roundm(x, y):
"""Return x rounded up to the closest multiple of y."""
return x if x % y == 0 else x + y - x % y
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def invert(mapper):
"""Invert a dict of lists preserving the order."""
inverse = OrderedDict()
for k, v in mapper.items():
for i in v:
inverse[i] = k
return inverse
def flatten(l):
"""Flatten a hierarchy of nested lists into a plain list."""
newlist = []
for el in l:
if isinstance(el, Iterable) and not isinstance(el, (str, bytes, np.ndarray)):
for sub in flatten(el):
newlist.append(sub)
else:
newlist.append(el)
return newlist
def single_or(l):
"""Return True iff only one item is different than ``None``, False otherwise.
Note that this is not a XOR function, according to the truth table of the XOR
boolean function with n > 2 inputs. Hence the name ``single_or``."""
# No
i = iter(l)
return any(i) and not any(i)
def filter_ordered(elements, key=None):
"""
Filter elements in a list while preserving order.
Parameters
----------
key : callable, optional
Conversion key used during equality comparison.
"""
if isinstance(elements, types.GeneratorType):
elements = list(elements)
seen = set()
if key is None:
try:
unordered, inds = np.unique(elements, return_index=True)
return unordered[np.argsort(inds)].tolist()
except:
return sorted(list(set(elements)), key=elements.index)
else:
ret = []
for e in elements:
k = key(e)
if k not in seen:
ret.append(e)
seen.add(k)
return ret
def filter_sorted(elements, key=None):
"""
Filter elements in a list and sort them by key. The default key is
``operator.attrgetter('name')``.
"""
if key is None:
key = attrgetter('name')
return sorted(filter_ordered(elements, key=key), key=key)
def dtype_to_cstr(dtype):
"""Translate numpy.dtype into C string."""
return cgen_dtype_to_ctype(dtype)
def dtype_to_ctype(dtype):
"""Translate numpy.dtype into a ctypes type."""
return {np.int32: ctypes.c_int,
np.float32: ctypes.c_float,
np.int64: ctypes.c_int64,
np.float64: ctypes.c_double}[dtype]
def dtype_to_mpitype(dtype):
"""Map numpy types to MPI datatypes."""
return {np.int32: 'MPI_INT',
np.float32: 'MPI_FLOAT',
np.int64: 'MPI_LONG',
np.float64: 'MPI_DOUBLE'}[dtype]
def ctypes_to_cstr(ctype, toarray=None):
"""Translate ctypes types into C strings."""
if issubclass(ctype, ctypes.Structure):
return 'struct %s' % ctype.__name__
elif issubclass(ctype, ctypes.Union):
return 'union %s' % ctype.__name__
elif issubclass(ctype, ctypes._Pointer):
if toarray:
return ctypes_to_cstr(ctype._type_, '(* %s)' % toarray)
else:
return '%s *' % ctypes_to_cstr(ctype._type_)
elif issubclass(ctype, ctypes.Array):
return '%s[%d]' % (ctypes_to_cstr(ctype._type_, toarray), ctype._length_)
elif ctype.__name__.startswith('c_'):
# A primitive datatype
# FIXME: Is there a better way of extracting the C typename ?
# Here, we're following the ctypes convention that each basic type has
# either the format c_X_p or c_X, where X is the C typename, for instance
# `int` or `float`.
if ctype.__name__.endswith('_p'):
return '%s *' % ctype.__name__[2:-2]
elif toarray:
return '%s %s' % (ctype.__name__[2:], toarray)
else:
return ctype.__name__[2:]
else:
# A custom datatype (e.g., a typedef-ed pointer to struct)
return ctype.__name__
def ctypes_pointer(name):
"""Create a ctypes type representing a C pointer to a custom data type ``name``."""
return type("c_%s_p" % name, (ctypes.c_void_p,), {})
def pprint(node, verbose=True):
"""
Shortcut to pretty print Iteration/Expression trees.
"""
from devito.ir.iet import printAST
print(printAST(node, verbose))
def sweep(parameters, keys=None):
"""
Generator to create a parameter sweep from a dictionary of values
or value lists.
"""
keys = keys or parameters.keys()
sweep_values = [parameters[key] for key in keys]
# Ensure all values are iterables to make sweeping safe
sweep_values = [[v] if isinstance(v, str) or not isinstance(v, Iterable) else v
for v in sweep_values]
for vals in product(*sweep_values):
yield dict(zip(keys, vals))
| [
"cgen.dtype_to_ctype",
"itertools.groupby",
"devito.ir.iet.printAST",
"itertools.zip_longest",
"numpy.argsort",
"itertools.combinations",
"operator.attrgetter",
"itertools.product",
"functools.reduce",
"collections.OrderedDict",
"numpy.unique"
] | [((721, 751), 'functools.reduce', 'reduce', (['mul', 'iterable', 'initial'], {}), '(mul, iterable, initial)\n', (727, 751), False, 'from functools import reduce\n'), ((2700, 2717), 'itertools.groupby', 'groupby', (['iterable'], {}), '(iterable)\n', (2707, 2717), False, 'from itertools import chain, combinations, groupby, product, zip_longest\n'), ((3553, 3566), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3564, 3566), False, 'from collections import OrderedDict\n'), ((5439, 5465), 'cgen.dtype_to_ctype', 'cgen_dtype_to_ctype', (['dtype'], {}), '(dtype)\n', (5458, 5465), True, 'from cgen import dtype_to_ctype as cgen_dtype_to_ctype\n'), ((7978, 8000), 'itertools.product', 'product', (['*sweep_values'], {}), '(*sweep_values)\n', (7985, 8000), False, 'from itertools import chain, combinations, groupby, product, zip_longest\n'), ((5272, 5290), 'operator.attrgetter', 'attrgetter', (['"""name"""'], {}), "('name')\n", (5282, 5290), False, 'from operator import attrgetter, mul\n'), ((7518, 7541), 'devito.ir.iet.printAST', 'printAST', (['node', 'verbose'], {}), '(node, verbose)\n', (7526, 7541), False, 'from devito.ir.iet import printAST\n'), ((2582, 2600), 'itertools.zip_longest', 'zip_longest', (['*args'], {}), '(*args)\n', (2593, 2600), False, 'from itertools import chain, combinations, groupby, product, zip_longest\n'), ((3417, 3435), 'itertools.combinations', 'combinations', (['s', 'r'], {}), '(s, r)\n', (3429, 3435), False, 'from itertools import chain, combinations, groupby, product, zip_longest\n'), ((4711, 4749), 'numpy.unique', 'np.unique', (['elements'], {'return_index': '(True)'}), '(elements, return_index=True)\n', (4720, 4749), True, 'import numpy as np\n'), ((4779, 4795), 'numpy.argsort', 'np.argsort', (['inds'], {}), '(inds)\n', (4789, 4795), True, 'import numpy as np\n')] |
from tensorflow._api.v2 import image
from metrics.results import get_image_results
from yolo.yolo_mask import YoloMask
import numpy as np
import os
def main():
yolo = YoloMask()
total_tp = 0
total_fp = 0
total_fn = 0
images_path = './../../darknet/data/mask-dataset/images/test/'
images = os.listdir(images_path)
for image in images:
image_name, ext = image.split('.')
if ext == 'txt':
continue
image = images_path + image
txt = images_path + image_name + '.txt'
results = predict_and_get_results(yolo, image, txt)
total_tp += results['true_positive']
total_fp += results['false_positive']
total_fn += results['false_negative']
print(total_tp)
print(total_fp)
print(total_fn)
def predict_and_get_results(yolo, image, txt):
bounding_boxes = yolo.get_bounding_boxes(image)
ground_truths = []
with open(txt, "r") as file:
lines = file.readlines()
for line in lines:
line = line.split(' ')
line = line[1:]
line[3] = line[3].split('\n')[0]
center_x = float(line[0])
center_y = float(line[1])
width = float(line[2])
height = float(line[3])
ground_truth = [
round(center_x - (width / 2), 6),
center_y - (height / 2),
center_x + (width / 2),
center_y + (height / 2)
]
ground_truths.append(ground_truth)
boxes, scores, classes, box_count = bounding_boxes
predicted_objs = boxes[0][0:box_count[0]]
ground_truths = np.asarray(ground_truths, dtype=np.float32)
return get_image_results(predicted_objs, ground_truths, 0.5)
if __name__ == '__main__':
# predict_and_get_results()
main() | [
"numpy.asarray",
"yolo.yolo_mask.YoloMask",
"metrics.results.get_image_results",
"tensorflow._api.v2.image.split",
"os.listdir"
] | [((172, 182), 'yolo.yolo_mask.YoloMask', 'YoloMask', ([], {}), '()\n', (180, 182), False, 'from yolo.yolo_mask import YoloMask\n'), ((315, 338), 'os.listdir', 'os.listdir', (['images_path'], {}), '(images_path)\n', (325, 338), False, 'import os\n'), ((1657, 1700), 'numpy.asarray', 'np.asarray', (['ground_truths'], {'dtype': 'np.float32'}), '(ground_truths, dtype=np.float32)\n', (1667, 1700), True, 'import numpy as np\n'), ((1713, 1766), 'metrics.results.get_image_results', 'get_image_results', (['predicted_objs', 'ground_truths', '(0.5)'], {}), '(predicted_objs, ground_truths, 0.5)\n', (1730, 1766), False, 'from metrics.results import get_image_results\n'), ((391, 407), 'tensorflow._api.v2.image.split', 'image.split', (['"""."""'], {}), "('.')\n", (402, 407), False, 'from tensorflow._api.v2 import image\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: CAM-resnet.py
import cv2
import sys
import argparse
import numpy as np
import os
import multiprocessing
import tensorflow as tf
from tensorpack import *
from tensorpack.dataflow import dataset
from tensorpack.tfutils import optimizer, gradproc
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
from tensorpack.utils.gpu import get_num_gpu
from tensorpack.utils import viz
from imagenet_utils import (
fbresnet_augmentor, ImageNetModel)
from resnet_model import (
preresnet_basicblock, preresnet_group)
TOTAL_BATCH_SIZE = 256
DEPTH = None
class Model(ImageNetModel):
def get_logits(self, image):
cfg = {
18: ([2, 2, 2, 2], preresnet_basicblock),
34: ([3, 4, 6, 3], preresnet_basicblock),
}
defs, block_func = cfg[DEPTH]
with argscope(Conv2D, use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_out')), \
argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format='channels_first'):
convmaps = (LinearWrap(image)
.Conv2D('conv0', 64, 7, strides=2, activation=BNReLU)
.MaxPooling('pool0', 3, strides=2, padding='SAME')
.apply2(preresnet_group, 'group0', block_func, 64, defs[0], 1)
.apply2(preresnet_group, 'group1', block_func, 128, defs[1], 2)
.apply2(preresnet_group, 'group2', block_func, 256, defs[2], 2)
.apply2(preresnet_group, 'group3new', block_func, 512, defs[3], 1)())
print(convmaps)
convmaps = GlobalAvgPooling('gap', convmaps)
logits = FullyConnected('linearnew', convmaps, 1000)
return logits
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.1, trainable=False)
opt = tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
gradprocs = [gradproc.ScaleGradient(
[('conv0.*', 0.1), ('group[0-2].*', 0.1)])]
return optimizer.apply_grad_processors(opt, gradprocs)
def get_data(train_or_test):
# completely copied from imagenet-resnet.py example
isTrain = train_or_test == 'train'
datadir = args.data
ds = dataset.ILSVRC12(datadir, train_or_test, shuffle=isTrain)
augmentors = fbresnet_augmentor(isTrain)
augmentors.append(imgaug.ToUint8())
ds = AugmentImageComponent(ds, augmentors, copy=False)
if isTrain:
ds = PrefetchDataZMQ(ds, min(25, multiprocessing.cpu_count()))
ds = BatchData(ds, BATCH_SIZE, remainder=not isTrain)
return ds
def get_config():
dataset_train = get_data('train')
dataset_val = get_data('val')
return TrainConfig(
model=Model(),
dataflow=dataset_train,
callbacks=[
ModelSaver(),
PeriodicTrigger(InferenceRunner(dataset_val, [
ClassificationError('wrong-top1', 'val-error-top1'),
ClassificationError('wrong-top5', 'val-error-top5')]),
every_k_epochs=2),
ScheduledHyperParamSetter('learning_rate',
[(30, 1e-2), (55, 1e-3), (75, 1e-4), (95, 1e-5)]),
],
steps_per_epoch=5000,
max_epoch=105,
)
def viz_cam(model_file, data_dir):
ds = get_data('val')
pred_config = PredictConfig(
model=Model(),
session_init=get_model_loader(model_file),
input_names=['input', 'label'],
output_names=['wrong-top1', 'group3new/bnlast/Relu', 'linearnew/W'],
return_input=True
)
meta = dataset.ILSVRCMeta().get_synset_words_1000()
pred = SimpleDatasetPredictor(pred_config, ds)
cnt = 0
for inp, outp in pred.get_result():
images, labels = inp
wrongs, convmaps, W = outp
batch = wrongs.shape[0]
for i in range(batch):
if wrongs[i]:
continue
weight = W[:, [labels[i]]].T # 512x1
convmap = convmaps[i, :, :, :] # 512xhxw
mergedmap = np.matmul(weight, convmap.reshape((512, -1))).reshape(14, 14)
mergedmap = cv2.resize(mergedmap, (224, 224))
heatmap = viz.intensity_to_rgb(mergedmap, normalize=True)
blend = images[i] * 0.5 + heatmap * 0.5
concat = np.concatenate((images[i], heatmap, blend), axis=1)
classname = meta[labels[i]].split(',')[0]
cv2.imwrite('cam{}-{}.jpg'.format(cnt, classname), concat)
cnt += 1
if cnt == 500:
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--data', help='ILSVRC dataset dir')
parser.add_argument('--depth', type=int, default=18)
parser.add_argument('--load', help='load model')
parser.add_argument('--cam', action='store_true', help='run visualization')
args = parser.parse_args()
DEPTH = args.depth
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
num_gpu = get_num_gpu()
BATCH_SIZE = TOTAL_BATCH_SIZE // num_gpu
if args.cam:
BATCH_SIZE = 128 # something that can run on one gpu
viz_cam(args.load, args.data)
sys.exit()
logger.auto_set_dir()
config = get_config()
if args.load:
config.session_init = get_model_loader(args.load)
launch_train_with_config(config, SyncMultiGPUTrainerParameterServer(num_gpu))
| [
"cv2.resize",
"tensorpack.tfutils.optimizer.apply_grad_processors",
"tensorpack.utils.gpu.get_num_gpu",
"argparse.ArgumentParser",
"numpy.concatenate",
"imagenet_utils.fbresnet_augmentor",
"tensorpack.dataflow.dataset.ILSVRCMeta",
"tensorpack.dataflow.dataset.ILSVRC12",
"multiprocessing.cpu_count",
... | [((2366, 2423), 'tensorpack.dataflow.dataset.ILSVRC12', 'dataset.ILSVRC12', (['datadir', 'train_or_test'], {'shuffle': 'isTrain'}), '(datadir, train_or_test, shuffle=isTrain)\n', (2382, 2423), False, 'from tensorpack.dataflow import dataset\n'), ((2441, 2468), 'imagenet_utils.fbresnet_augmentor', 'fbresnet_augmentor', (['isTrain'], {}), '(isTrain)\n', (2459, 2468), False, 'from imagenet_utils import fbresnet_augmentor, ImageNetModel\n'), ((4734, 4759), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4757, 4759), False, 'import argparse\n'), ((5232, 5245), 'tensorpack.utils.gpu.get_num_gpu', 'get_num_gpu', ([], {}), '()\n', (5243, 5245), False, 'from tensorpack.utils.gpu import get_num_gpu\n'), ((1906, 1972), 'tensorflow.get_variable', 'tf.get_variable', (['"""learning_rate"""'], {'initializer': '(0.1)', 'trainable': '(False)'}), "('learning_rate', initializer=0.1, trainable=False)\n", (1921, 1972), True, 'import tensorflow as tf\n'), ((1987, 2041), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['lr', '(0.9)'], {'use_nesterov': '(True)'}), '(lr, 0.9, use_nesterov=True)\n', (2013, 2041), True, 'import tensorflow as tf\n'), ((2158, 2205), 'tensorpack.tfutils.optimizer.apply_grad_processors', 'optimizer.apply_grad_processors', (['opt', 'gradprocs'], {}), '(opt, gradprocs)\n', (2189, 2205), False, 'from tensorpack.tfutils import optimizer, gradproc\n'), ((5419, 5429), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5427, 5429), False, 'import sys\n'), ((2063, 2128), 'tensorpack.tfutils.gradproc.ScaleGradient', 'gradproc.ScaleGradient', (["[('conv0.*', 0.1), ('group[0-2].*', 0.1)]"], {}), "([('conv0.*', 0.1), ('group[0-2].*', 0.1)])\n", (2085, 2128), False, 'from tensorpack.tfutils import optimizer, gradproc\n'), ((3723, 3743), 'tensorpack.dataflow.dataset.ILSVRCMeta', 'dataset.ILSVRCMeta', ([], {}), '()\n', (3741, 3743), False, 'from tensorpack.dataflow import dataset\n'), ((4266, 4299), 'cv2.resize', 'cv2.resize', (['mergedmap', '(224, 224)'], {}), '(mergedmap, (224, 224))\n', (4276, 4299), False, 'import cv2\n'), ((4322, 4369), 'tensorpack.utils.viz.intensity_to_rgb', 'viz.intensity_to_rgb', (['mergedmap'], {'normalize': '(True)'}), '(mergedmap, normalize=True)\n', (4342, 4369), False, 'from tensorpack.utils import viz\n'), ((4443, 4494), 'numpy.concatenate', 'np.concatenate', (['(images[i], heatmap, blend)'], {'axis': '(1)'}), '((images[i], heatmap, blend), axis=1)\n', (4457, 4494), True, 'import numpy as np\n'), ((2626, 2653), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (2651, 2653), False, 'import multiprocessing\n'), ((973, 1031), 'tensorflow.variance_scaling_initializer', 'tf.variance_scaling_initializer', ([], {'scale': '(2.0)', 'mode': '"""fan_out"""'}), "(scale=2.0, mode='fan_out')\n", (1004, 1031), True, 'import tensorflow as tf\n')] |
import numpy as np
def number_steps_in_queue_simulation(queue_length):
curr_position = queue_length - 1
steps = 0
while curr_position != 0:
new_position_for_first_element = np.random.randint(0, queue_length)
if new_position_for_first_element >= curr_position:
curr_position -= 1
steps += 1
return steps
def first_n_harmonic_numbers_generator(n):
"""Method for computing the first n harmonic numbers.
"""
sum = 0
yield sum # The first harmonic number H_0 is 0
for k in range(1, n):
sum += 1/k
yield sum | [
"numpy.random.randint"
] | [((194, 228), 'numpy.random.randint', 'np.random.randint', (['(0)', 'queue_length'], {}), '(0, queue_length)\n', (211, 228), True, 'import numpy as np\n')] |
"""Compressed Sparse Column matrix format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['csc_matrix', 'isspmatrix_csc']
from warnings import warn
import numpy as np
from scipy.lib.six import xrange
from .base import isspmatrix
from ._sparsetools import csc_tocsr
from . import _sparsetools
from .sputils import upcast, isintlike, IndexMixin, get_index_dtype
from .compressed import _cs_matrix
class csc_matrix(_cs_matrix, IndexMixin):
"""
Compressed Sparse Column matrix
This can be instantiated in several ways:
csc_matrix(D)
with a dense matrix or rank-2 ndarray D
csc_matrix(S)
with another sparse matrix S (equivalent to S.tocsc())
csc_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
csc_matrix((data, ij), [shape=(M, N)])
where ``data`` and ``ij`` satisfy the relationship
``a[ij[0, k], ij[1, k]] = data[k]``
csc_matrix((data, indices, indptr), [shape=(M, N)])
is the standard CSC representation where the row indices for
column i are stored in ``indices[indptr[i]:indptr[i+1]]``
and their corresponding values are stored in
``data[indptr[i]:indptr[i+1]]``. If the shape parameter is
not supplied, the matrix dimensions are inferred from
the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
Data array of the matrix
indices
CSC format index array
indptr
CSC format index pointer array
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the CSC format
- efficient arithmetic operations CSC + CSC, CSC * CSC, etc.
- efficient column slicing
- fast matrix vector products (CSR, BSR may be faster)
Disadvantages of the CSC format
- slow row slicing operations (consider CSR)
- changes to the sparsity structure are expensive (consider LIL or DOK)
Examples
--------
>>> from scipy.sparse import *
>>> from scipy import *
>>> csc_matrix((3, 4), dtype=int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = array([0, 2, 2, 0, 1, 2])
>>> col = array([0, 0, 1, 2, 2, 2])
>>> data = array([1, 2, 3, 4, 5, 6])
>>> csc_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
>>> indptr = array([0, 2, 3, 6])
>>> indices = array([0, 2, 2, 0, 1, 2])
>>> data = array([1, 2, 3, 4, 5, 6])
>>> csc_matrix((data, indices, indptr), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
"""
def transpose(self, copy=False):
from .csr import csr_matrix
M,N = self.shape
return csr_matrix((self.data,self.indices,self.indptr),(N,M),copy=copy)
def __iter__(self):
csr = self.tocsr()
for r in xrange(self.shape[0]):
yield csr[r,:]
def tocsc(self, copy=False):
if copy:
return self.copy()
else:
return self
def tocsr(self):
M,N = self.shape
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(self.nnz, N))
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
csc_tocsr(M, N,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
indptr,
indices,
data)
from .csr import csr_matrix
A = csr_matrix((data, indices, indptr), shape=self.shape)
A.has_sorted_indices = True
return A
def __getitem__(self, key):
# Use CSR to implement fancy indexing.
row, col = self._unpack_index(key)
# Things that return submatrices. row or col is a int or slice.
if (isinstance(row, slice) or isinstance(col, slice) or
isintlike(row) or isintlike(col)):
return self.T[col, row].T
# Things that return a sequence of values.
else:
return self.T[col, row]
def nonzero(self):
# CSC can't use _cs_matrix's .nonzero method because it
# returns the indices sorted for self transposed.
# Get row and col indices, from _cs_matrix.tocoo
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indptr.dtype)
_sparsetools.expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
# Sort them to be in C-style order
ind = np.lexsort((col, row))
row = row[ind]
col = col[ind]
return row, col
nonzero.__doc__ = _cs_matrix.nonzero.__doc__
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n)
CSR matrix (row vector).
"""
# we convert to CSR to maintain compatibility with old impl.
# in spmatrix.getrow()
return self._get_submatrix(i, slice(None)).tocsr()
def getcol(self, i):
"""Returns a copy of column i of the matrix, as a (m x 1)
CSC matrix (column vector).
"""
return self._get_submatrix(slice(None), i)
# these functions are used by the parent class (_cs_matrix)
# to remove redudancy between csc_matrix and csr_matrix
def _swap(self,x):
"""swap the members of x if this is a column-oriented matrix
"""
return (x[1],x[0])
def isspmatrix_csc(x):
return isinstance(x, csc_matrix)
| [
"scipy.lib.six.xrange",
"numpy.empty",
"numpy.lexsort"
] | [((3465, 3486), 'scipy.lib.six.xrange', 'xrange', (['self.shape[0]'], {}), '(self.shape[0])\n', (3471, 3486), False, 'from scipy.lib.six import xrange\n'), ((3825, 3857), 'numpy.empty', 'np.empty', (['(M + 1)'], {'dtype': 'idx_dtype'}), '(M + 1, dtype=idx_dtype)\n', (3833, 3857), True, 'import numpy as np\n'), ((3876, 3911), 'numpy.empty', 'np.empty', (['self.nnz'], {'dtype': 'idx_dtype'}), '(self.nnz, dtype=idx_dtype)\n', (3884, 3911), True, 'import numpy as np\n'), ((5371, 5393), 'numpy.lexsort', 'np.lexsort', (['(col, row)'], {}), '((col, row))\n', (5381, 5393), True, 'import numpy as np\n')] |
import argparse
import logging
import time
from os import makedirs
from os.path import exists, join
from shutil import rmtree
import matplotlib.pyplot as plt
import numpy as np
from dejavu.tests.dejavu_test import (DejavuTest, autolabeldoubles,
generate_test_files, log_msg, set_seed)
def main(seconds: int, results_folder: str, temp_folder: str, log: bool, silent: bool,
log_file: str, padding: int, seed: int, src: str):
# set random seed if set by user
set_seed(seed)
# ensure results folder exists
if not exists(results_folder):
makedirs(results_folder)
# set logging
if log:
logging.basicConfig(filename=log_file, level=logging.DEBUG)
# set test seconds
test_seconds = [f'{i}sec' for i in range(1, seconds + 1, 1)]
# generate testing files
for i in range(1, seconds + 1, 1):
generate_test_files(src, temp_folder, i, padding=padding)
# scan files
log_msg(f"Running Dejavu fingerprinter on files in {src}...", log=log, silent=silent)
tm = time.time()
djv = DejavuTest(temp_folder, test_seconds)
log_msg(f"finished obtaining results from dejavu in {(time.time() - tm)}", log=log, silent=silent)
tests = 1 # djv
n_secs = len(test_seconds)
# set result variables -> 4d variables
all_match_counter = [[[0 for x in range(tests)] for x in range(3)] for x in range(n_secs)]
all_matching_times_counter = [[[0 for x in range(tests)] for x in range(2)] for x in range(n_secs)]
all_query_duration = [[[0 for x in range(tests)] for x in range(djv.n_lines)] for x in range(n_secs)]
all_match_confidence = [[[0 for x in range(tests)] for x in range(djv.n_lines)] for x in range(n_secs)]
# group results by seconds
for line in range(0, djv.n_lines):
for col in range(0, djv.n_columns):
# for dejavu
all_query_duration[col][line][0] = djv.result_query_duration[line][col]
all_match_confidence[col][line][0] = djv.result_match_confidence[line][col]
djv_match_result = djv.result_match[line][col]
if djv_match_result == 'yes':
all_match_counter[col][0][0] += 1
elif djv_match_result == 'no':
all_match_counter[col][1][0] += 1
else:
all_match_counter[col][2][0] += 1
djv_match_acc = djv.result_matching_times[line][col]
if djv_match_acc == 0 and djv_match_result == 'yes':
all_matching_times_counter[col][0][0] += 1
elif djv_match_acc != 0:
all_matching_times_counter[col][1][0] += 1
# create plots
djv.create_plots('Confidence', all_match_confidence, results_folder)
djv.create_plots('Query duration', all_query_duration, results_folder)
for sec in range(0, n_secs):
ind = np.arange(3)
width = 0.25 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([-1 * width, 2.75])
means_dvj = [round(x[0] * 100 / djv.n_lines, 1) for x in all_match_counter[sec]]
rects1 = ax.bar(ind, means_dvj, width, color='r')
# add some
ax.set_ylabel('Matching Percentage')
ax.set_title(f'{test_seconds[sec]} Matching Percentage')
ax.set_xticks(ind + width)
labels = ['yes', 'no', 'invalid']
ax.set_xticklabels(labels)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
autolabeldoubles(rects1, ax)
plt.grid()
fig_name = join(results_folder, f"matching_perc_{test_seconds[sec]}.png")
fig.savefig(fig_name)
for sec in range(0, n_secs):
ind = np.arange(2)
width = 0.25 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([-1 * width, 1.75])
div = all_match_counter[sec][0][0]
if div == 0:
div = 1000000
means_dvj = [round(x[0] * 100 / div, 1) for x in all_matching_times_counter[sec]]
rects1 = ax.bar(ind, means_dvj, width, color='r')
# add some
ax.set_ylabel('Matching Accuracy')
ax.set_title(f'{test_seconds[sec]} Matching Times Accuracy')
ax.set_xticks(ind + width)
labels = ['yes', 'no']
ax.set_xticklabels(labels)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
autolabeldoubles(rects1, ax)
plt.grid()
fig_name = join(results_folder, f"matching_acc_{test_seconds[sec]}.png")
fig.savefig(fig_name)
# remove temporary folder
rmtree(temp_folder)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=f'Runs a few tests for dejavu to evaluate '
f'its configuration performance. '
f'Usage: %(prog).py [options] TESTING_AUDIOFOLDER'
)
parser.add_argument("-sec", "--seconds", action="store", default=5, type=int,
help='Number of seconds starting from zero to test.')
parser.add_argument("-res", "--results-folder", action="store", default="./dejavu_test_results",
help='Sets the path where the results are saved.')
parser.add_argument("-temp", "--temp-folder", action="store", default="./dejavu_temp_testing_files",
help='Sets the path where the temp files are saved.')
parser.add_argument("-l", "--log", action="store_true", default=False, help='Enables logging.')
parser.add_argument("-sl", "--silent", action="store_false", default=False, help='Disables printing.')
parser.add_argument("-lf", "--log-file", default="results-compare.log",
help='Set the path and filename of the log file.')
parser.add_argument("-pad", "--padding", action="store", default=10, type=int,
help='Number of seconds to pad choice of place to test from.')
parser.add_argument("-sd", "--seed", action="store", default=None, type=int, help='Random seed.')
parser.add_argument("src", type=str, help='Source folder for audios to use as tests.')
args = parser.parse_args()
main(args.seconds, args.results_folder, args.temp_folder, args.log, args.silent, args.log_file, args.padding,
args.seed, args.src)
| [
"dejavu.tests.dejavu_test.DejavuTest",
"argparse.ArgumentParser",
"os.makedirs",
"logging.basicConfig",
"os.path.join",
"os.path.exists",
"time.time",
"dejavu.tests.dejavu_test.set_seed",
"matplotlib.pyplot.figure",
"numpy.arange",
"dejavu.tests.dejavu_test.generate_test_files",
"dejavu.tests.... | [((516, 530), 'dejavu.tests.dejavu_test.set_seed', 'set_seed', (['seed'], {}), '(seed)\n', (524, 530), False, 'from dejavu.tests.dejavu_test import DejavuTest, autolabeldoubles, generate_test_files, log_msg, set_seed\n'), ((980, 1069), 'dejavu.tests.dejavu_test.log_msg', 'log_msg', (['f"""Running Dejavu fingerprinter on files in {src}..."""'], {'log': 'log', 'silent': 'silent'}), "(f'Running Dejavu fingerprinter on files in {src}...', log=log,\n silent=silent)\n", (987, 1069), False, 'from dejavu.tests.dejavu_test import DejavuTest, autolabeldoubles, generate_test_files, log_msg, set_seed\n'), ((1076, 1087), 'time.time', 'time.time', ([], {}), '()\n', (1085, 1087), False, 'import time\n'), ((1098, 1135), 'dejavu.tests.dejavu_test.DejavuTest', 'DejavuTest', (['temp_folder', 'test_seconds'], {}), '(temp_folder, test_seconds)\n', (1108, 1135), False, 'from dejavu.tests.dejavu_test import DejavuTest, autolabeldoubles, generate_test_files, log_msg, set_seed\n'), ((4696, 4715), 'shutil.rmtree', 'rmtree', (['temp_folder'], {}), '(temp_folder)\n', (4702, 4715), False, 'from shutil import rmtree\n'), ((4758, 4926), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'f"""Runs a few tests for dejavu to evaluate its configuration performance. Usage: %(prog).py [options] TESTING_AUDIOFOLDER"""'}), "(description=\n f'Runs a few tests for dejavu to evaluate its configuration performance. Usage: %(prog).py [options] TESTING_AUDIOFOLDER'\n )\n", (4781, 4926), False, 'import argparse\n'), ((578, 600), 'os.path.exists', 'exists', (['results_folder'], {}), '(results_folder)\n', (584, 600), False, 'from os.path import exists, join\n'), ((610, 634), 'os.makedirs', 'makedirs', (['results_folder'], {}), '(results_folder)\n', (618, 634), False, 'from os import makedirs\n'), ((674, 733), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'log_file', 'level': 'logging.DEBUG'}), '(filename=log_file, level=logging.DEBUG)\n', (693, 733), False, 'import logging\n'), ((900, 957), 'dejavu.tests.dejavu_test.generate_test_files', 'generate_test_files', (['src', 'temp_folder', 'i'], {'padding': 'padding'}), '(src, temp_folder, i, padding=padding)\n', (919, 957), False, 'from dejavu.tests.dejavu_test import DejavuTest, autolabeldoubles, generate_test_files, log_msg, set_seed\n'), ((2878, 2890), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (2887, 2890), True, 'import numpy as np\n'), ((2952, 2964), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2962, 2964), True, 'import matplotlib.pyplot as plt\n'), ((3543, 3571), 'dejavu.tests.dejavu_test.autolabeldoubles', 'autolabeldoubles', (['rects1', 'ax'], {}), '(rects1, ax)\n', (3559, 3571), False, 'from dejavu.tests.dejavu_test import DejavuTest, autolabeldoubles, generate_test_files, log_msg, set_seed\n'), ((3580, 3590), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3588, 3590), True, 'import matplotlib.pyplot as plt\n'), ((3611, 3673), 'os.path.join', 'join', (['results_folder', 'f"""matching_perc_{test_seconds[sec]}.png"""'], {}), "(results_folder, f'matching_perc_{test_seconds[sec]}.png')\n", (3615, 3673), False, 'from os.path import exists, join\n'), ((3752, 3764), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (3761, 3764), True, 'import numpy as np\n'), ((3826, 3838), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3836, 3838), True, 'import matplotlib.pyplot as plt\n'), ((4500, 4528), 'dejavu.tests.dejavu_test.autolabeldoubles', 'autolabeldoubles', (['rects1', 'ax'], {}), '(rects1, ax)\n', (4516, 4528), False, 'from dejavu.tests.dejavu_test import DejavuTest, autolabeldoubles, generate_test_files, log_msg, set_seed\n'), ((4538, 4548), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4546, 4548), True, 'import matplotlib.pyplot as plt\n'), ((4569, 4630), 'os.path.join', 'join', (['results_folder', 'f"""matching_acc_{test_seconds[sec]}.png"""'], {}), "(results_folder, f'matching_acc_{test_seconds[sec]}.png')\n", (4573, 4630), False, 'from os.path import exists, join\n'), ((1194, 1205), 'time.time', 'time.time', ([], {}), '()\n', (1203, 1205), False, 'import time\n')] |
import batoid
import numpy as np
from test_helpers import timer, do_pickle, rays_allclose, all_obj_diff
@timer
def test_properties():
np.random.seed(5)
for _ in range(100):
s1 = batoid.Sphere(np.random.uniform(1, 3))
s2 = batoid.Paraboloid(np.random.uniform(1, 3))
sum = batoid.Sum([s1, s2])
do_pickle(sum)
# check commutativity
assert sum == batoid.Sum([s2, s1])
# order of sum.surfaces is not guaranteed
assert s1 in sum.surfaces
assert s2 in sum.surfaces
s3 = batoid.Quadric(np.random.uniform(3, 5), np.random.uniform(-0.1, 0.1))
sum2 = batoid.Sum([s1, s2, s3])
do_pickle(sum2)
# check commutativity
assert sum2 == batoid.Sum([s2, s3, s1])
assert sum2 == batoid.Sum([s3, s1, s2])
assert sum2 == batoid.Sum([s3, s2, s1])
assert sum2 == batoid.Sum([s2, s1, s3])
assert sum2 == batoid.Sum([s1, s3, s2])
assert s1 in sum2.surfaces
assert s2 in sum2.surfaces
assert s3 in sum2.surfaces
do_pickle(sum)
@timer
def test_sag():
np.random.seed(57)
for _ in range(100):
s1 = batoid.Sphere(np.random.uniform(1, 3))
s2 = batoid.Paraboloid(np.random.uniform(1, 3))
sum = batoid.Sum([s1, s2])
x = np.random.normal(size=5000)
y = np.random.normal(size=5000)
np.testing.assert_allclose(
sum.sag(x, y),
s1.sag(x, y) + s2.sag(x, y),
rtol=1e-12,
atol=1e-12
)
s3 = batoid.Quadric(np.random.uniform(3, 5), np.random.uniform(-0.1, 0.1))
sum2 = batoid.Sum([s1, s2, s3])
np.testing.assert_allclose(
sum2.sag(x, y),
s1.sag(x, y) + s2.sag(x, y) + s3.sag(x, y),
rtol=1e-12,
atol=1e-12
)
@timer
def test_add_plane():
np.random.seed(577)
for _ in range(100):
# Adding a plane should have zero effect on sag or normal vector
s1 = batoid.Sphere(np.random.uniform(1, 3))
s2 = batoid.Plane()
sum = batoid.Sum([s1, s2])
x = np.random.normal(size=5000)
y = np.random.normal(size=5000)
np.testing.assert_allclose(
sum.sag(x, y),
s1.sag(x, y),
rtol=1e-12,
atol=1e-12
)
for _x, _y in zip(x[:100], y[:100]):
np.testing.assert_allclose(
sum.normal(_x, _y),
s1.normal(_x, _y),
rtol=1e-12,
atol=1e-12
)
@timer
def test_sum_paraboloid():
# para_sag = r^2/(2*R^2)
# so two paraboloids yields r^2 * (1/(2*R1) + 1/(2*R2))
# so (1/(2*R1) + 1/(2*R2)) = 1/(2*R)
# implies
# 0.5/(1/(2*R1) + 1/(2*R2)) = R
np.random.seed(5772)
for _ in range(100):
R1 = np.random.uniform(1, 2)
R2 = np.random.uniform(2, 3)
Rsum = 0.5/(1/(2*R1) + 1/(2*R2))
para1 = batoid.Paraboloid(R1)
para2 = batoid.Paraboloid(R2)
paraSum = batoid.Paraboloid(Rsum)
paraSum2 = batoid.Sum([para1, para2])
x = np.random.normal(size=5000)
y = np.random.normal(size=5000)
np.testing.assert_allclose(
paraSum.sag(x, y),
paraSum2.sag(x, y),
rtol=1e-12,
atol=1e-12
)
for _x, _y in zip(x[:100], y[:100]):
np.testing.assert_allclose(
paraSum.normal(_x, _y),
paraSum2.normal(_x, _y),
rtol=1e-12,
atol=1e-12
)
@timer
def test_intersect():
np.random.seed(57721)
rv0 = batoid.RayVector([
batoid.Ray(
np.random.normal(scale=0.1),
np.random.normal(scale=0.1),
10,
np.random.normal(scale=1e-4),
np.random.normal(scale=1e-4),
-1
)
for _ in range(100)
])
for _ in range(100):
s1 = batoid.Sphere(np.random.uniform(3, 10))
s2 = batoid.Paraboloid(np.random.uniform(3, 10))
sum = batoid.Sum([s1, s2])
rv = batoid.RayVector(rv0)
rv1 = sum.intersect(rv)
rv2 = batoid.RayVector([sum.intersect(r) for r in rv])
rv3 = batoid.RayVector(rv)
sum.intersectInPlace(rv)
for r in rv3:
sum.intersectInPlace(r)
assert rays_allclose(rv1, rv)
assert rays_allclose(rv2, rv)
assert rays_allclose(rv3, rv)
@timer
def test_ne():
objs = [
batoid.Sum([batoid.Plane(), batoid.Plane()]),
batoid.Sum([batoid.Plane(), batoid.Sphere(1.0)]),
batoid.Sum([batoid.Plane(), batoid.Plane(), batoid.Plane()]),
batoid.Plane()
]
all_obj_diff(objs)
@timer
def test_fail():
sum = batoid.Sum([batoid.Plane(), batoid.Sphere(1.0)])
ray = batoid.Ray([0,0,sum.sag(0,0)-1], [0,0,-1])
ray = sum.intersect(ray)
assert ray.failed
ray = batoid.Ray([0,0,sum.sag(0,0)-1], [0,0,-1])
sum.intersectInPlace(ray)
assert ray.failed
if __name__ == '__main__':
test_properties()
test_sag()
test_add_plane()
test_sum_paraboloid()
test_intersect()
test_ne()
test_fail()
| [
"batoid.Sphere",
"numpy.random.uniform",
"numpy.random.seed",
"test_helpers.rays_allclose",
"batoid.RayVector",
"test_helpers.do_pickle",
"batoid.Plane",
"numpy.random.normal",
"batoid.Sum",
"test_helpers.all_obj_diff",
"batoid.Paraboloid"
] | [((140, 157), 'numpy.random.seed', 'np.random.seed', (['(5)'], {}), '(5)\n', (154, 157), True, 'import numpy as np\n'), ((1118, 1136), 'numpy.random.seed', 'np.random.seed', (['(57)'], {}), '(57)\n', (1132, 1136), True, 'import numpy as np\n'), ((1885, 1904), 'numpy.random.seed', 'np.random.seed', (['(577)'], {}), '(577)\n', (1899, 1904), True, 'import numpy as np\n'), ((2792, 2812), 'numpy.random.seed', 'np.random.seed', (['(5772)'], {}), '(5772)\n', (2806, 2812), True, 'import numpy as np\n'), ((3627, 3648), 'numpy.random.seed', 'np.random.seed', (['(57721)'], {}), '(57721)\n', (3641, 3648), True, 'import numpy as np\n'), ((4735, 4753), 'test_helpers.all_obj_diff', 'all_obj_diff', (['objs'], {}), '(objs)\n', (4747, 4753), False, 'from test_helpers import timer, do_pickle, rays_allclose, all_obj_diff\n'), ((305, 325), 'batoid.Sum', 'batoid.Sum', (['[s1, s2]'], {}), '([s1, s2])\n', (315, 325), False, 'import batoid\n'), ((334, 348), 'test_helpers.do_pickle', 'do_pickle', (['sum'], {}), '(sum)\n', (343, 348), False, 'from test_helpers import timer, do_pickle, rays_allclose, all_obj_diff\n'), ((640, 664), 'batoid.Sum', 'batoid.Sum', (['[s1, s2, s3]'], {}), '([s1, s2, s3])\n', (650, 664), False, 'import batoid\n'), ((673, 688), 'test_helpers.do_pickle', 'do_pickle', (['sum2'], {}), '(sum2)\n', (682, 688), False, 'from test_helpers import timer, do_pickle, rays_allclose, all_obj_diff\n'), ((1074, 1088), 'test_helpers.do_pickle', 'do_pickle', (['sum'], {}), '(sum)\n', (1083, 1088), False, 'from test_helpers import timer, do_pickle, rays_allclose, all_obj_diff\n'), ((1284, 1304), 'batoid.Sum', 'batoid.Sum', (['[s1, s2]'], {}), '([s1, s2])\n', (1294, 1304), False, 'import batoid\n'), ((1318, 1345), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(5000)'}), '(size=5000)\n', (1334, 1345), True, 'import numpy as np\n'), ((1358, 1385), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(5000)'}), '(size=5000)\n', (1374, 1385), True, 'import numpy as np\n'), ((1647, 1671), 'batoid.Sum', 'batoid.Sum', (['[s1, s2, s3]'], {}), '([s1, s2, s3])\n', (1657, 1671), False, 'import batoid\n'), ((2068, 2082), 'batoid.Plane', 'batoid.Plane', ([], {}), '()\n', (2080, 2082), False, 'import batoid\n'), ((2097, 2117), 'batoid.Sum', 'batoid.Sum', (['[s1, s2]'], {}), '([s1, s2])\n', (2107, 2117), False, 'import batoid\n'), ((2131, 2158), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(5000)'}), '(size=5000)\n', (2147, 2158), True, 'import numpy as np\n'), ((2171, 2198), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(5000)'}), '(size=5000)\n', (2187, 2198), True, 'import numpy as np\n'), ((2851, 2874), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(2)'], {}), '(1, 2)\n', (2868, 2874), True, 'import numpy as np\n'), ((2888, 2911), 'numpy.random.uniform', 'np.random.uniform', (['(2)', '(3)'], {}), '(2, 3)\n', (2905, 2911), True, 'import numpy as np\n'), ((2970, 2991), 'batoid.Paraboloid', 'batoid.Paraboloid', (['R1'], {}), '(R1)\n', (2987, 2991), False, 'import batoid\n'), ((3008, 3029), 'batoid.Paraboloid', 'batoid.Paraboloid', (['R2'], {}), '(R2)\n', (3025, 3029), False, 'import batoid\n'), ((3048, 3071), 'batoid.Paraboloid', 'batoid.Paraboloid', (['Rsum'], {}), '(Rsum)\n', (3065, 3071), False, 'import batoid\n'), ((3091, 3117), 'batoid.Sum', 'batoid.Sum', (['[para1, para2]'], {}), '([para1, para2])\n', (3101, 3117), False, 'import batoid\n'), ((3131, 3158), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(5000)'}), '(size=5000)\n', (3147, 3158), True, 'import numpy as np\n'), ((3171, 3198), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(5000)'}), '(size=5000)\n', (3187, 3198), True, 'import numpy as np\n'), ((4089, 4109), 'batoid.Sum', 'batoid.Sum', (['[s1, s2]'], {}), '([s1, s2])\n', (4099, 4109), False, 'import batoid\n'), ((4124, 4145), 'batoid.RayVector', 'batoid.RayVector', (['rv0'], {}), '(rv0)\n', (4140, 4145), False, 'import batoid\n'), ((4255, 4275), 'batoid.RayVector', 'batoid.RayVector', (['rv'], {}), '(rv)\n', (4271, 4275), False, 'import batoid\n'), ((4384, 4406), 'test_helpers.rays_allclose', 'rays_allclose', (['rv1', 'rv'], {}), '(rv1, rv)\n', (4397, 4406), False, 'from test_helpers import timer, do_pickle, rays_allclose, all_obj_diff\n'), ((4422, 4444), 'test_helpers.rays_allclose', 'rays_allclose', (['rv2', 'rv'], {}), '(rv2, rv)\n', (4435, 4444), False, 'from test_helpers import timer, do_pickle, rays_allclose, all_obj_diff\n'), ((4460, 4482), 'test_helpers.rays_allclose', 'rays_allclose', (['rv3', 'rv'], {}), '(rv3, rv)\n', (4473, 4482), False, 'from test_helpers import timer, do_pickle, rays_allclose, all_obj_diff\n'), ((4710, 4724), 'batoid.Plane', 'batoid.Plane', ([], {}), '()\n', (4722, 4724), False, 'import batoid\n'), ((210, 233), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(3)'], {}), '(1, 3)\n', (227, 233), True, 'import numpy as np\n'), ((266, 289), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(3)'], {}), '(1, 3)\n', (283, 289), True, 'import numpy as np\n'), ((401, 421), 'batoid.Sum', 'batoid.Sum', (['[s2, s1]'], {}), '([s2, s1])\n', (411, 421), False, 'import batoid\n'), ((570, 593), 'numpy.random.uniform', 'np.random.uniform', (['(3)', '(5)'], {}), '(3, 5)\n', (587, 593), True, 'import numpy as np\n'), ((595, 623), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (612, 623), True, 'import numpy as np\n'), ((742, 766), 'batoid.Sum', 'batoid.Sum', (['[s2, s3, s1]'], {}), '([s2, s3, s1])\n', (752, 766), False, 'import batoid\n'), ((790, 814), 'batoid.Sum', 'batoid.Sum', (['[s3, s1, s2]'], {}), '([s3, s1, s2])\n', (800, 814), False, 'import batoid\n'), ((838, 862), 'batoid.Sum', 'batoid.Sum', (['[s3, s2, s1]'], {}), '([s3, s2, s1])\n', (848, 862), False, 'import batoid\n'), ((886, 910), 'batoid.Sum', 'batoid.Sum', (['[s2, s1, s3]'], {}), '([s2, s1, s3])\n', (896, 910), False, 'import batoid\n'), ((934, 958), 'batoid.Sum', 'batoid.Sum', (['[s1, s3, s2]'], {}), '([s1, s3, s2])\n', (944, 958), False, 'import batoid\n'), ((1189, 1212), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(3)'], {}), '(1, 3)\n', (1206, 1212), True, 'import numpy as np\n'), ((1245, 1268), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(3)'], {}), '(1, 3)\n', (1262, 1268), True, 'import numpy as np\n'), ((1577, 1600), 'numpy.random.uniform', 'np.random.uniform', (['(3)', '(5)'], {}), '(3, 5)\n', (1594, 1600), True, 'import numpy as np\n'), ((1602, 1630), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (1619, 1630), True, 'import numpy as np\n'), ((2030, 2053), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(3)'], {}), '(1, 3)\n', (2047, 2053), True, 'import numpy as np\n'), ((3992, 4016), 'numpy.random.uniform', 'np.random.uniform', (['(3)', '(10)'], {}), '(3, 10)\n', (4009, 4016), True, 'import numpy as np\n'), ((4049, 4073), 'numpy.random.uniform', 'np.random.uniform', (['(3)', '(10)'], {}), '(3, 10)\n', (4066, 4073), True, 'import numpy as np\n'), ((4802, 4816), 'batoid.Plane', 'batoid.Plane', ([], {}), '()\n', (4814, 4816), False, 'import batoid\n'), ((4818, 4836), 'batoid.Sphere', 'batoid.Sphere', (['(1.0)'], {}), '(1.0)\n', (4831, 4836), False, 'import batoid\n'), ((3710, 3737), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.1)'}), '(scale=0.1)\n', (3726, 3737), True, 'import numpy as np\n'), ((3751, 3778), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.1)'}), '(scale=0.1)\n', (3767, 3778), True, 'import numpy as np\n'), ((3808, 3838), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.0001)'}), '(scale=0.0001)\n', (3824, 3838), True, 'import numpy as np\n'), ((3850, 3880), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.0001)'}), '(scale=0.0001)\n', (3866, 3880), True, 'import numpy as np\n'), ((4540, 4554), 'batoid.Plane', 'batoid.Plane', ([], {}), '()\n', (4552, 4554), False, 'import batoid\n'), ((4556, 4570), 'batoid.Plane', 'batoid.Plane', ([], {}), '()\n', (4568, 4570), False, 'import batoid\n'), ((4594, 4608), 'batoid.Plane', 'batoid.Plane', ([], {}), '()\n', (4606, 4608), False, 'import batoid\n'), ((4610, 4628), 'batoid.Sphere', 'batoid.Sphere', (['(1.0)'], {}), '(1.0)\n', (4623, 4628), False, 'import batoid\n'), ((4652, 4666), 'batoid.Plane', 'batoid.Plane', ([], {}), '()\n', (4664, 4666), False, 'import batoid\n'), ((4668, 4682), 'batoid.Plane', 'batoid.Plane', ([], {}), '()\n', (4680, 4682), False, 'import batoid\n'), ((4684, 4698), 'batoid.Plane', 'batoid.Plane', ([], {}), '()\n', (4696, 4698), False, 'import batoid\n')] |
#!/usr/bin/env python
import respirnet
import numpy as np
import networkx as nx
import sys
import argparse
'''
This class outputs a file (.gml) that contains a network for the pre-bot and botzinger complex based on the desired network parameters. This class allows you to vary the architecture based on the amount of intra population inhibition. Recommended range is gamma between 0 and 1. Zero will translate to a model similar to the half center oscillator and one will be unifrom inhibition across the network
INPUTS:
n0 - The number of nodes that are in population 1 (pre-bot)
n1 - The number of nodes that are in populatuon 2 (bot)
intra - The amount of intra population inhibition
inter - The amound of inter population inhibition
#####OPTIONAL Inputs####
pI - The probability of an inhibitory neuron being created
gE - The conductance for excitatory snyapses in nS
gI - The conductance for inhibitory synapses in nS
Return:
output - file name for the .gml file the graph will be saved to
'''
def main(argv = None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(prog="genPreBotBot_gamma",
description = 'Generates Graph based on Block Model with varying amounts of intra inhibition')
parser.add_argument('n0', type = int, help='number of nodes in pop1')
parser.add_argument('n1', type = int, help='number of nodes in pop2')
parser.add_argument('intraDegree', type = float, help='the average degree for a population to itself')
parser.add_argument('interDegree',type = float, help='the average degree for a population to the other')
parser.add_argument('output', help='output filename')
parser.add_argument('-pI', type = float,
help = 'probability of inhibitory neuron',
default = .2)
parser.add_argument('-gE', type=float,
help='conductance of excitatory (E) synapses, nS',
default = 2.5)
parser.add_argument('-gI', type=float,
help='conductance of inhibitory (I) synapses, nS',
default = 2.5)
args = parser.parse_args(argv[1:])
n0 = args.n0
n1 = args.n1
intraDegree = args.intraDegree
interDegree = args.interDegree
output = args.output
gE = args.gE
gI = args.gI
pI = args.pI
#Each column of a matrix is responsible for the probability that index 1 connects to index 2
#Excitatory connection probability matrix
pMatE = np.array([ (3.0/(n0-1), 0.00/n1),
(0.00/n0, 3.0/(n1-1)) ])
#Inhibitory connection probaility matrix
pMatI = np.array([ (intraDegree/(n0-1), interDegree/n1),
(interDegree/n0, intraDegree/(n1-1)) ])
#Probability distribution for neuron types (?,Bursting,Tonic,Quiescent)
pTypes = [0, 0.25, 0.45, 0.3]
g = respirnet.er_prebot_bot(n0,n1,pMatI, pMatE, pTypes, pI, gE, gI)
nx.write_gml(g, output)
if __name__ == '__main__':
status = main()
sys.exit(status)
| [
"respirnet.er_prebot_bot",
"argparse.ArgumentParser",
"numpy.array",
"networkx.write_gml",
"sys.exit"
] | [((1150, 1303), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""genPreBotBot_gamma"""', 'description': '"""Generates Graph based on Block Model with varying amounts of intra inhibition"""'}), "(prog='genPreBotBot_gamma', description=\n 'Generates Graph based on Block Model with varying amounts of intra inhibition'\n )\n", (1173, 1303), False, 'import argparse\n'), ((2652, 2718), 'numpy.array', 'np.array', (['[(3.0 / (n0 - 1), 0.0 / n1), (0.0 / n0, 3.0 / (n1 - 1))]'], {}), '([(3.0 / (n0 - 1), 0.0 / n1), (0.0 / n0, 3.0 / (n1 - 1))])\n', (2660, 2718), True, 'import numpy as np\n'), ((2839, 2942), 'numpy.array', 'np.array', (['[(intraDegree / (n0 - 1), interDegree / n1), (interDegree / n0, intraDegree /\n (n1 - 1))]'], {}), '([(intraDegree / (n0 - 1), interDegree / n1), (interDegree / n0, \n intraDegree / (n1 - 1))])\n', (2847, 2942), True, 'import numpy as np\n'), ((3104, 3169), 'respirnet.er_prebot_bot', 'respirnet.er_prebot_bot', (['n0', 'n1', 'pMatI', 'pMatE', 'pTypes', 'pI', 'gE', 'gI'], {}), '(n0, n1, pMatI, pMatE, pTypes, pI, gE, gI)\n', (3127, 3169), False, 'import respirnet\n'), ((3172, 3195), 'networkx.write_gml', 'nx.write_gml', (['g', 'output'], {}), '(g, output)\n', (3184, 3195), True, 'import networkx as nx\n'), ((3248, 3264), 'sys.exit', 'sys.exit', (['status'], {}), '(status)\n', (3256, 3264), False, 'import sys\n')] |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a simple algorithm for extracting nearest neighbor
exchange parameters by mapping low energy magnetic orderings to a Heisenberg
model.
"""
import copy
import logging
import sys
from ast import literal_eval
import numpy as np
import pandas as pd
from monty.json import MSONable, jsanitize
from monty.serialization import dumpfn
from pymatgen import Structure
from pymatgen.analysis.graphs import StructureGraph
from pymatgen.analysis.local_env import MinimumDistanceNN
from pymatgen.analysis.magnetism import CollinearMagneticStructureAnalyzer, Ordering
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
__author__ = "ncfrey"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
__date__ = "June 2019"
class HeisenbergMapper:
"""
Class to compute exchange parameters from low energy magnetic orderings.
"""
def __init__(self, ordered_structures, energies, cutoff=0.0, tol=0.02):
"""
Exchange parameters are computed by mapping to a classical Heisenberg
model. Strategy is the scheme for generating neighbors. Currently only
MinimumDistanceNN is implemented.
n+1 unique orderings are required to compute n exchange
parameters.
First run a MagneticOrderingsWF to obtain low energy collinear magnetic
orderings and find the magnetic ground state. Then enumerate magnetic
states with the ground state as the input structure, find the subset
of supercells that map to the ground state, and do static calculations
for these orderings.
Args:
ordered_structures (list): Structure objects with magmoms.
energies (list): Total energies of each relaxed magnetic structure.
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
Defaults to 0 (only NN, no NNN, etc.)
tol (float): Tolerance (in Angstrom) on nearest neighbor distances
being equal.
Parameters:
strategy (object): Class from pymatgen.analysis.local_env for
constructing graphs.
sgraphs (list): StructureGraph objects.
unique_site_ids (dict): Maps each site to its unique numerical
identifier.
wyckoff_ids (dict): Maps unique numerical identifier to wyckoff
position.
nn_interacations (dict): {i: j} pairs of NN interactions
between unique sites.
dists (dict): NN, NNN, and NNNN interaction distances
ex_mat (DataFrame): Invertible Heisenberg Hamiltonian for each
graph.
ex_params (dict): Exchange parameter values (meV/atom)
"""
# Save original copies of inputs
self.ordered_structures_ = ordered_structures
self.energies_ = energies
# Sanitize inputs and optionally order them by energy / magnetic moments
hs = HeisenbergScreener(ordered_structures, energies, screen=False)
ordered_structures = hs.screened_structures
energies = hs.screened_energies
self.ordered_structures = ordered_structures
self.energies = energies
self.cutoff = cutoff
self.tol = tol
# Get graph representations
self.sgraphs = self._get_graphs(cutoff, ordered_structures)
# Get unique site ids and wyckoff symbols
self.unique_site_ids, self.wyckoff_ids = self._get_unique_sites(
ordered_structures[0]
)
# These attributes are set by internal methods
self.nn_interactions = None
self.dists = None
self.ex_mat = None
self.ex_params = None
# Check how many commensurate graphs we found
if len(self.sgraphs) < 2:
print("We need at least 2 unique orderings.")
sys.exit(1)
else: # Set attributes
self._get_nn_dict()
self._get_exchange_df()
@staticmethod
def _get_graphs(cutoff, ordered_structures):
"""
Generate graph representations of magnetic structures with nearest
neighbor bonds. Right now this only works for MinimumDistanceNN.
Args:
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
ordered_structures (list): Structure objects.
Returns:
sgraphs (list): StructureGraph objects.
"""
# Strategy for finding neighbors
if cutoff:
strategy = MinimumDistanceNN(cutoff=cutoff, get_all_sites=True)
else:
strategy = MinimumDistanceNN() # only NN
# Generate structure graphs
sgraphs = [
StructureGraph.with_local_env_strategy(s, strategy=strategy)
for s in ordered_structures
]
return sgraphs
@staticmethod
def _get_unique_sites(structure):
"""
Get dict that maps site indices to unique identifiers.
Args:
structure (Structure): ground state Structure object.
Returns:
unique_site_ids (dict): maps tuples of equivalent site indices to a
unique int identifier
wyckoff_ids (dict): maps tuples of equivalent site indices to their
wyckoff symbols
"""
# Get a nonmagnetic representation of the supercell geometry
s0 = CollinearMagneticStructureAnalyzer(
structure, make_primitive=False, threshold=0.0
).get_nonmagnetic_structure(make_primitive=False)
# Get unique sites and wyckoff positions
if "wyckoff" in s0.site_properties:
s0.remove_site_property("wyckoff")
symm_s0 = SpacegroupAnalyzer(s0).get_symmetrized_structure()
wyckoff = ["n/a"] * len(symm_s0)
equivalent_indices = symm_s0.equivalent_indices
wyckoff_symbols = symm_s0.wyckoff_symbols
# Construct dictionaries that map sites to numerical and wyckoff
# identifiers
unique_site_ids = {}
wyckoff_ids = {}
i = 0
for indices, symbol in zip(equivalent_indices, wyckoff_symbols):
unique_site_ids[tuple(indices)] = i
wyckoff_ids[i] = symbol
i += 1
for index in indices:
wyckoff[index] = symbol
return unique_site_ids, wyckoff_ids
def _get_nn_dict(self):
"""Get dict of unique nearest neighbor interactions.
Returns:
None: (sets self.nn_interactions and self.dists instance variables)
"""
tol = self.tol # tolerance on NN distances
sgraph = self.sgraphs[0]
unique_site_ids = self.unique_site_ids
nn_dict = {}
nnn_dict = {}
nnnn_dict = {}
all_dists = []
# Loop over unique sites and get neighbor distances up to NNNN
for k in unique_site_ids:
i = k[0]
i_key = unique_site_ids[k]
connected_sites = sgraph.get_connected_sites(i)
dists = [round(cs[-1], 2) for cs in connected_sites] # i<->j distances
dists = sorted(list(set(dists))) # NN, NNN, NNNN, etc.
dists = dists[:3] # keep up to NNNN
all_dists += dists
# Keep only up to NNNN and call dists equal if they are within tol
all_dists = sorted(list(set(all_dists)))
rm_list = []
for idx, d in enumerate(all_dists[:-1]):
if abs(d - all_dists[idx + 1]) < tol:
rm_list.append(idx + 1)
all_dists = [d for idx, d in enumerate(all_dists) if idx not in rm_list]
if len(all_dists) < 3: # pad with zeros
all_dists += [0.0] * (3 - len(all_dists))
all_dists = all_dists[:3]
labels = ["nn", "nnn", "nnnn"]
dists = {l: d for (l, d) in zip(labels, all_dists)}
# Get dictionary keys for interactions
for k in unique_site_ids:
i = k[0]
i_key = unique_site_ids[k]
connected_sites = sgraph.get_connected_sites(i)
# Loop over sites and determine unique NN, NNN, etc. interactions
for cs in connected_sites:
dist = round(cs[-1], 2) # i_j distance
j = cs[2] # j index
for key in unique_site_ids.keys():
if j in key:
j_key = unique_site_ids[key]
if abs(dist - dists["nn"]) <= tol:
nn_dict[i_key] = j_key
elif abs(dist - dists["nnn"]) <= tol:
nnn_dict[i_key] = j_key
elif abs(dist - dists["nnnn"]) <= tol:
nnnn_dict[i_key] = j_key
nn_interactions = {"nn": nn_dict, "nnn": nnn_dict, "nnnn": nnnn_dict}
self.dists = dists
self.nn_interactions = nn_interactions
def _get_exchange_df(self):
"""
Loop over all sites in a graph and count the number and types of
nearest neighbor interactions, computing +-|S_i . S_j| to construct
a Heisenberg Hamiltonian for each graph.
Returns:
None: (sets self.ex_mat instance variable)
TODO:
* Deal with large variance in |S| across configs
"""
sgraphs = self.sgraphs
tol = self.tol
unique_site_ids = self.unique_site_ids
nn_interactions = self.nn_interactions
dists = self.dists
# Get |site magmoms| from FM ordering so that S_i and S_j are consistent?
# Large S variations is throwing a loop
# fm_struct = self.get_low_energy_orderings()[0]
# Total energy and nonmagnetic energy contribution
columns = ["E", "E0"]
# Get labels of unique NN interactions
for k0, v0 in nn_interactions.items():
for i, j in v0.items(): # i and j indices
c = str(i) + "-" + str(j) + "-" + str(k0)
c_rev = str(j) + "-" + str(i) + "-" + str(k0)
if c not in columns and c_rev not in columns:
columns.append(c)
num_sgraphs = len(sgraphs)
# Keep n interactions (not counting 'E') for n+1 structure graphs
columns = columns[: num_sgraphs + 1]
num_nn_j = len(columns) - 1 # ignore total energy
j_columns = [name for name in columns if name not in ["E", "E0"]]
ex_mat_empty = pd.DataFrame(columns=columns)
ex_mat = ex_mat_empty.copy()
if len(j_columns) < 2:
self.ex_mat = ex_mat # Only <J> can be calculated here
else:
sgraphs_copy = copy.deepcopy(sgraphs)
sgraph_index = 0
# Loop over all sites in each graph and compute |S_i . S_j|
# for n+1 unique graphs to compute n exchange params
for graph in sgraphs:
sgraph = sgraphs_copy.pop(0)
ex_row = pd.DataFrame(
np.zeros((1, num_nn_j + 1)), index=[sgraph_index], columns=columns
)
for i, node in enumerate(sgraph.graph.nodes):
# s_i_sign = np.sign(sgraph.structure.site_properties['magmom'][i])
s_i = sgraph.structure.site_properties["magmom"][i]
for k in unique_site_ids.keys():
if i in k:
i_index = unique_site_ids[k]
# Get all connections for ith site and compute |S_i . S_j|
connections = sgraph.get_connected_sites(i)
# dists = [round(cs[-1], 2) for cs in connections] # i<->j distances
# dists = sorted(list(set(dists))) # NN, NNN, NNNN, etc.
for j, connection in enumerate(connections):
j_site = connection[2]
dist = round(connection[-1], 2) # i_j distance
# s_j_sign = np.sign(sgraph.structure.site_properties['magmom'][j_site])
s_j = sgraph.structure.site_properties["magmom"][j_site]
for k in unique_site_ids.keys():
if j_site in k:
j_index = unique_site_ids[k]
# Determine order of connection
if abs(dist - dists["nn"]) <= tol:
order = "-nn"
elif abs(dist - dists["nnn"]) <= tol:
order = "-nnn"
elif abs(dist - dists["nnnn"]) <= tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in ex_mat.columns:
ex_row.at[sgraph_index, j_ij] -= s_i * s_j
elif j_ji in ex_mat.columns:
ex_row.at[sgraph_index, j_ji] -= s_i * s_j
# Ignore the row if it is a duplicate to avoid singular matrix
if ex_mat.append(ex_row)[j_columns].equals(
ex_mat.append(ex_row)[j_columns].drop_duplicates(keep="first")
):
e_index = self.ordered_structures.index(sgraph.structure)
ex_row.at[sgraph_index, "E"] = self.energies[e_index]
sgraph_index += 1
ex_mat = ex_mat.append(ex_row)
# if sgraph_index == num_nn_j: # check for zero columns
# zeros = [b for b in (ex_mat[j_columns] == 0).all(axis=0)]
# if True in zeros:
# sgraph_index -= 1 # keep looking
ex_mat[j_columns] = ex_mat[j_columns].div(
2.0
) # 1/2 factor in Heisenberg Hamiltonian
ex_mat[["E0"]] = 1 # Nonmagnetic contribution
# Check for singularities and delete columns with all zeros
zeros = [b for b in (ex_mat == 0).all(axis=0)]
if True in zeros:
c = ex_mat.columns[zeros.index(True)]
ex_mat = ex_mat.drop(columns=[c], axis=1)
# ex_mat = ex_mat.drop(ex_mat.tail(len_zeros).index)
# Force ex_mat to be square
ex_mat = ex_mat[: ex_mat.shape[1] - 1]
self.ex_mat = ex_mat
def get_exchange(self):
"""
Take Heisenberg Hamiltonian and corresponding energy for each row and
solve for the exchange parameters.
Returns:
ex_params (dict): Exchange parameter values (meV/atom).
"""
ex_mat = self.ex_mat
# Solve the matrix equation for J_ij values
E = ex_mat[["E"]]
j_names = [j for j in ex_mat.columns if j not in ["E"]]
# Only 1 NN interaction
if len(j_names) < 3:
# Estimate exchange by J ~ E_AFM - E_FM
j_avg = self.estimate_exchange()
ex_params = {"<J>": j_avg}
self.ex_params = ex_params
return ex_params
# Solve eigenvalue problem for more than 1 NN interaction
H = ex_mat.loc[:, ex_mat.columns != "E"].values
H_inv = np.linalg.inv(H)
j_ij = np.dot(H_inv, E)
# Convert J_ij to meV
j_ij[1:] *= 1000 # J_ij in meV
j_ij = j_ij.tolist()
ex_params = {j_name: j[0] for j_name, j in zip(j_names, j_ij)}
self.ex_params = ex_params
return ex_params
def get_low_energy_orderings(self):
"""
Find lowest energy FM and AFM orderings to compute E_AFM - E_FM.
Returns:
fm_struct (Structure): fm structure with 'magmom' site property
afm_struct (Structure): afm structure with 'magmom' site property
fm_e (float): fm energy
afm_e (float): afm energy
"""
fm_struct, afm_struct = None, None
mag_min = np.inf
mag_max = 0.001
fm_e_min = 0
afm_e_min = 0
# epas = [e / len(s) for (e, s) in zip(self.energies, self.ordered_structures)]
for s, e in zip(self.ordered_structures, self.energies):
ordering = CollinearMagneticStructureAnalyzer(
s, threshold=0.0, make_primitive=False
).ordering
magmoms = s.site_properties["magmom"]
# Try to find matching orderings first
if ordering == Ordering.FM and e < fm_e_min:
fm_struct = s
mag_max = abs(sum(magmoms))
fm_e = e
fm_e_min = e
if ordering == Ordering.AFM and e < afm_e_min:
afm_struct = s
afm_e = e
mag_min = abs(sum(magmoms))
afm_e_min = e
# Brute force search for closest thing to FM and AFM
if not fm_struct or not afm_struct:
for s, e in zip(self.ordered_structures, self.energies):
magmoms = s.site_properties["magmom"]
if abs(sum(magmoms)) > mag_max: # FM ground state
fm_struct = s
fm_e = e
mag_max = abs(sum(magmoms))
# AFM ground state
if abs(sum(magmoms)) < mag_min:
afm_struct = s
afm_e = e
mag_min = abs(sum(magmoms))
afm_e_min = e
elif abs(sum(magmoms)) == 0 and mag_min == 0:
if e < afm_e_min:
afm_struct = s
afm_e = e
afm_e_min = e
# Convert to magnetic structures with 'magmom' site property
fm_struct = CollinearMagneticStructureAnalyzer(
fm_struct, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
afm_struct = CollinearMagneticStructureAnalyzer(
afm_struct, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
return fm_struct, afm_struct, fm_e, afm_e
def estimate_exchange(self, fm_struct=None, afm_struct=None, fm_e=None, afm_e=None):
"""
Estimate <J> for a structure based on low energy FM and AFM orderings.
Args:
fm_struct (Structure): fm structure with 'magmom' site property
afm_struct (Structure): afm structure with 'magmom' site property
fm_e (float): fm energy/atom
afm_e (float): afm energy/atom
Returns:
j_avg (float): Average exchange parameter (meV/atom)
"""
# Get low energy orderings if not supplied
if any(arg is None for arg in [fm_struct, afm_struct, fm_e, afm_e]):
fm_struct, afm_struct, fm_e, afm_e = self.get_low_energy_orderings()
magmoms = fm_struct.site_properties["magmom"]
# Normalize energies by number of magnetic ions
# fm_e = fm_e / len(magmoms)
# afm_e = afm_e / len(afm_magmoms)
m_avg = np.mean([np.sqrt(m ** 2) for m in magmoms])
# If m_avg for FM config is < 1 we won't get sensibile results.
if m_avg < 1:
iamthedanger = """
Local magnetic moments are small (< 1 muB / atom). The
exchange parameters may be wrong, but <J> and the mean
field critical temperature estimate may be OK.
"""
logging.warning(iamthedanger)
delta_e = afm_e - fm_e # J > 0 -> FM
j_avg = delta_e / (m_avg ** 2) # eV / magnetic ion
j_avg *= 1000 # meV / ion
return j_avg
def get_mft_temperature(self, j_avg):
"""
Crude mean field estimate of critical temperature based on <J> for
one sublattice, or solving the coupled equations for a multisublattice
material.
Args:
j_avg (float): j_avg (float): Average exchange parameter (meV/atom)
Returns:
mft_t (float): Critical temperature (K)
"""
num_sublattices = len(self.unique_site_ids)
k_boltzmann = 0.0861733 # meV/K
# Only 1 magnetic sublattice
if num_sublattices == 1:
mft_t = 2 * abs(j_avg) / 3 / k_boltzmann
else: # multiple magnetic sublattices
omega = np.zeros((num_sublattices, num_sublattices))
ex_params = self.ex_params
ex_params = {k: v for (k, v) in ex_params.items() if k != "E0"} # ignore E0
for k in ex_params:
# split into i, j unique site identifiers
sites = [elem for elem in k.split("-")]
sites = [int(num) for num in sites[:2]] # cut 'nn' identifier
i, j = sites[0], sites[1]
omega[i, j] += ex_params[k]
omega[j, i] += ex_params[k]
omega = omega * 2 / 3 / k_boltzmann
eigenvals, eigenvecs = np.linalg.eig(omega)
mft_t = max(eigenvals)
if mft_t > 1500: # Not sensible!
stayoutofmyterritory = """
This mean field estimate is too high! Probably
the true low energy orderings were not given as inputs.
"""
logging.warning(stayoutofmyterritory)
return mft_t
def get_interaction_graph(self, filename=None):
"""
Get a StructureGraph with edges and weights that correspond to exchange
interactions and J_ij values, respectively.
Args:
filename (str): if not None, save interaction graph to filename.
Returns:
igraph (StructureGraph): Exchange interaction graph.
"""
structure = self.ordered_structures[0]
sgraph = self.sgraphs[0]
igraph = StructureGraph.with_empty_graph(
structure, edge_weight_name="exchange_constant", edge_weight_units="meV"
)
if "<J>" in self.ex_params: # Only <J> is available
warning_msg = """
Only <J> is available. The interaction graph will not tell
you much.
"""
logging.warning(warning_msg)
# J_ij exchange interaction matrix
for i, node in enumerate(sgraph.graph.nodes):
connections = sgraph.get_connected_sites(i)
for c in connections:
jimage = c[1] # relative integer coordinates of atom j
j = c[2] # index of neighbor
dist = c[-1] # i <-> j distance
j_exc = self._get_j_exc(i, j, dist)
igraph.add_edge(
i, j, to_jimage=jimage, weight=j_exc, warn_duplicates=False
)
# Save to a json file if desired
if filename:
if filename.endswith(".json"):
dumpfn(igraph, filename)
else:
filename += ".json"
dumpfn(igraph, filename)
return igraph
def _get_j_exc(self, i, j, dist):
"""
Convenience method for looking up exchange parameter between two sites.
Args:
i (int): index of ith site
j (int): index of jth site
dist (float): distance (Angstrom) between sites
(10E-2 precision)
Returns:
j_exc (float): Exchange parameter in meV
"""
# Get unique site identifiers
for k in self.unique_site_ids.keys():
if i in k:
i_index = self.unique_site_ids[k]
if j in k:
j_index = self.unique_site_ids[k]
order = ""
# Determine order of interaction
if abs(dist - self.dists["nn"]) <= self.tol:
order = "-nn"
elif abs(dist - self.dists["nnn"]) <= self.tol:
order = "-nnn"
elif abs(dist - self.dists["nnnn"]) <= self.tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in self.ex_params:
j_exc = self.ex_params[j_ij]
elif j_ji in self.ex_params:
j_exc = self.ex_params[j_ji]
else:
j_exc = 0
# Check if only averaged NN <J> values are available
if "<J>" in self.ex_params and order == "-nn":
j_exc = self.ex_params["<J>"]
return j_exc
def get_heisenberg_model(self):
"""Save results of mapping to a HeisenbergModel object.
Returns:
hmodel (HeisenbergModel): MSONable object.
"""
# Original formula unit with nonmagnetic ions
hm_formula = str(self.ordered_structures_[0].composition.reduced_formula)
hm_structures = self.ordered_structures
hm_energies = self.energies
hm_cutoff = self.cutoff
hm_tol = self.tol
hm_sgraphs = self.sgraphs
hm_usi = self.unique_site_ids
hm_wids = self.wyckoff_ids
hm_nni = self.nn_interactions
hm_d = self.dists
# Exchange matrix DataFrame in json format
hm_em = self.ex_mat.to_json()
hm_ep = self.get_exchange()
hm_javg = self.estimate_exchange()
hm_igraph = self.get_interaction_graph()
hmodel = HeisenbergModel(
hm_formula,
hm_structures,
hm_energies,
hm_cutoff,
hm_tol,
hm_sgraphs,
hm_usi,
hm_wids,
hm_nni,
hm_d,
hm_em,
hm_ep,
hm_javg,
hm_igraph,
)
return hmodel
class HeisenbergScreener:
"""
Class to clean and screen magnetic orderings.
"""
def __init__(self, structures, energies, screen=False):
"""
This class pre-processes magnetic orderings and energies for
HeisenbergMapper. It prioritizes low-energy orderings with large and
localized magnetic moments.
Args:
structures (list): Structure objects with magnetic moments.
energies (list): Energies/atom of magnetic orderings.
screen (bool): Try to screen out high energy and low-spin configurations.
Attributes:
screened_structures (list): Sorted structures.
screened_energies (list): Sorted energies.
"""
# Cleanup
structures, energies = self._do_cleanup(structures, energies)
n_structures = len(structures)
# If there are more than 2 structures, we want to perform a
# screening to prioritize well-behaved orderings
if screen and n_structures > 2:
structures, energies = self._do_screen(structures, energies)
self.screened_structures = structures
self.screened_energies = energies
@staticmethod
def _do_cleanup(structures, energies):
"""Sanitize input structures and energies.
Takes magnetic structures and performs the following operations
- Erases nonmagnetic ions and gives all ions ['magmom'] site prop
- Converts total energies -> energy / magnetic ion
- Checks for duplicate/degenerate orderings
- Sorts by energy
Args:
structures (list): Structure objects with magmoms.
energies (list): Corresponding energies.
Returns:
ordered_structures (list): Sanitized structures.
ordered_energies (list): Sorted energies.
"""
# Get only magnetic ions & give all structures site_properties['magmom']
# zero threshold so that magnetic ions with small moments
# are preserved
ordered_structures = [
CollinearMagneticStructureAnalyzer(
s, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
for s in structures
]
# Convert to energies / magnetic ion
energies = [e / len(s) for (e, s) in zip(energies, ordered_structures)]
# Check for duplicate / degenerate states (sometimes different initial
# configs relax to the same state)
remove_list = []
for i, e in enumerate(energies):
e_tol = 6 # 10^-6 eV/atom tol on energies
e = round(e, e_tol)
if i not in remove_list:
for i_check, e_check in enumerate(energies):
e_check = round(e_check, e_tol)
if i != i_check and i_check not in remove_list and e == e_check:
remove_list.append(i_check)
# Also discard structures with small |magmoms| < 0.1 uB
# xx - get rid of these or just bury them in the list?
# for i, s in enumerate(ordered_structures):
# magmoms = s.site_properties['magmom']
# if i not in remove_list:
# if any(abs(m) < 0.1 for m in magmoms):
# remove_list.append(i)
# Remove duplicates
if len(remove_list):
ordered_structures = [
s for i, s in enumerate(ordered_structures) if i not in remove_list
]
energies = [e for i, e in enumerate(energies) if i not in remove_list]
# Sort by energy if not already sorted
ordered_structures = [
s for _, s in sorted(zip(energies, ordered_structures), reverse=False)
]
ordered_energies = sorted(energies, reverse=False)
return ordered_structures, ordered_energies
@staticmethod
def _do_screen(structures, energies):
"""Screen and sort magnetic orderings based on some criteria.
Prioritize low energy orderings and large, localized magmoms. do_clean should be run first to sanitize inputs.
Args:
structures (list): At least three structure objects.
energies (list): Energies.
Returns:
screened_structures (list): Sorted structures.
screened_energies (list): Sorted energies.
"""
magmoms = [s.site_properties["magmom"] for s in structures]
n_below_1ub = [len([m for m in ms if abs(m) < 1]) for ms in magmoms]
df = pd.DataFrame(
{
"structure": structures,
"energy": energies,
"magmoms": magmoms,
"n_below_1ub": n_below_1ub,
}
)
# keep the ground and first excited state fixed to capture the
# low-energy spectrum
index = list(df.index)[2:]
df_high_energy = df.iloc[2:]
# Prioritize structures with fewer magmoms < 1 uB
df_high_energy = df_high_energy.sort_values(by="n_below_1ub")
index = [0, 1] + list(df_high_energy.index)
# sort
df = df.reindex(index)
screened_structures = list(df["structure"].values)
screened_energies = list(df["energy"].values)
return screened_structures, screened_energies
class HeisenbergModel(MSONable):
"""
Store a Heisenberg model fit to low-energy magnetic orderings.
Intended to be generated by HeisenbergMapper.get_heisenberg_model().
"""
def __init__(
self,
formula=None,
structures=None,
energies=None,
cutoff=None,
tol=None,
sgraphs=None,
unique_site_ids=None,
wyckoff_ids=None,
nn_interactions=None,
dists=None,
ex_mat=None,
ex_params=None,
javg=None,
igraph=None,
):
"""
Args:
formula (str): Reduced formula of compound.
structures (list): Structure objects with magmoms.
energies (list): Energies of each relaxed magnetic structure.
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
tol (float): Tolerance (in Angstrom) on nearest neighbor distances being equal.
sgraphs (list): StructureGraph objects.
unique_site_ids (dict): Maps each site to its unique numerical
identifier.
wyckoff_ids (dict): Maps unique numerical identifier to wyckoff
position.
nn_interacations (dict): {i: j} pairs of NN interactions
between unique sites.
dists (dict): NN, NNN, and NNNN interaction distances
ex_mat (DataFrame): Invertible Heisenberg Hamiltonian for each
graph.
ex_params (dict): Exchange parameter values (meV/atom).
javg (float): <J> exchange param (meV/atom).
igraph (StructureGraph): Exchange interaction graph.
"""
self.formula = formula
self.structures = structures
self.energies = energies
self.cutoff = cutoff
self.tol = tol
self.sgraphs = sgraphs
self.unique_site_ids = unique_site_ids
self.wyckoff_ids = wyckoff_ids
self.nn_interactions = nn_interactions
self.dists = dists
self.ex_mat = ex_mat
self.ex_params = ex_params
self.javg = javg
self.igraph = igraph
def as_dict(self):
"""
Because some dicts have tuple keys, some sanitization is required for json compatibility.
"""
d = {}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["@version"] = __version__
d["formula"] = self.formula
d["structures"] = [s.as_dict() for s in self.structures]
d["energies"] = self.energies
d["cutoff"] = self.cutoff
d["tol"] = self.tol
d["sgraphs"] = [sgraph.as_dict() for sgraph in self.sgraphs]
d["dists"] = self.dists
d["ex_params"] = self.ex_params
d["javg"] = self.javg
d["igraph"] = self.igraph.as_dict()
# Sanitize tuple & int keys
d["ex_mat"] = jsanitize(self.ex_mat)
d["nn_interactions"] = jsanitize(self.nn_interactions)
d["unique_site_ids"] = jsanitize(self.unique_site_ids)
d["wyckoff_ids"] = jsanitize(self.wyckoff_ids)
return d
@classmethod
def from_dict(cls, d):
"""Create a HeisenbergModel from a dict."""
# Reconstitute the site ids
usids = {}
wids = {}
nnis = {}
for k, v in d["nn_interactions"].items():
nn_dict = {}
for k1, v1 in v.items():
key = literal_eval(k1)
nn_dict[key] = v1
nnis[k] = nn_dict
for k, v in d["unique_site_ids"].items():
key = literal_eval(k)
if isinstance(key, int):
usids[tuple([key])] = v
elif isinstance(key, tuple):
usids[key] = v
for k, v in d["wyckoff_ids"].items():
key = literal_eval(k)
wids[key] = v
# Reconstitute the structure and graph objects
structures = []
sgraphs = []
for v in d["structures"]:
structures.append(Structure.from_dict(v))
for v in d["sgraphs"]:
sgraphs.append(StructureGraph.from_dict(v))
# Interaction graph
igraph = StructureGraph.from_dict(d["igraph"])
# Reconstitute the exchange matrix DataFrame
try:
ex_mat = eval(d["ex_mat"])
ex_mat = pd.DataFrame.from_dict(ex_mat)
except SyntaxError: # if ex_mat is empty
ex_mat = pd.DataFrame(columns=["E", "E0"])
hmodel = HeisenbergModel(
formula=d["formula"],
structures=structures,
energies=d["energies"],
cutoff=d["cutoff"],
tol=d["tol"],
sgraphs=sgraphs,
unique_site_ids=usids,
wyckoff_ids=wids,
nn_interactions=nnis,
dists=d["dists"],
ex_mat=ex_mat,
ex_params=d["ex_params"],
javg=d["javg"],
igraph=igraph,
)
return hmodel
def _get_j_exc(self, i, j, dist):
"""
Convenience method for looking up exchange parameter between two sites.
Args:
i (int): index of ith site
j (int): index of jth site
dist (float): distance (Angstrom) between sites +- tol
Returns:
j_exc (float): Exchange parameter in meV
"""
# Get unique site identifiers
for k in self.unique_site_ids.keys():
if i in k:
i_index = self.unique_site_ids[k]
if j in k:
j_index = self.unique_site_ids[k]
order = ""
# Determine order of interaction
if abs(dist - self.dists["nn"]) <= self.tol:
order = "-nn"
elif abs(dist - self.dists["nnn"]) <= self.tol:
order = "-nnn"
elif abs(dist - self.dists["nnnn"]) <= self.tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in self.ex_params:
j_exc = self.ex_params[j_ij]
elif j_ji in self.ex_params:
j_exc = self.ex_params[j_ji]
else:
j_exc = 0
# Check if only averaged NN <J> values are available
if "<J>" in self.ex_params and order == "-nn":
j_exc = self.ex_params["<J>"]
return j_exc
| [
"pymatgen.analysis.graphs.StructureGraph.from_dict",
"pymatgen.analysis.local_env.MinimumDistanceNN",
"pandas.DataFrame",
"pymatgen.analysis.graphs.StructureGraph.with_local_env_strategy",
"logging.warning",
"numpy.linalg.eig",
"monty.serialization.dumpfn",
"pymatgen.Structure.from_dict",
"copy.deep... | [((10534, 10563), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns'}), '(columns=columns)\n', (10546, 10563), True, 'import pandas as pd\n'), ((15375, 15391), 'numpy.linalg.inv', 'np.linalg.inv', (['H'], {}), '(H)\n', (15388, 15391), True, 'import numpy as np\n'), ((15407, 15423), 'numpy.dot', 'np.dot', (['H_inv', 'E'], {}), '(H_inv, E)\n', (15413, 15423), True, 'import numpy as np\n'), ((21996, 22106), 'pymatgen.analysis.graphs.StructureGraph.with_empty_graph', 'StructureGraph.with_empty_graph', (['structure'], {'edge_weight_name': '"""exchange_constant"""', 'edge_weight_units': '"""meV"""'}), "(structure, edge_weight_name=\n 'exchange_constant', edge_weight_units='meV')\n", (22027, 22106), False, 'from pymatgen.analysis.graphs import StructureGraph\n'), ((30423, 30534), 'pandas.DataFrame', 'pd.DataFrame', (["{'structure': structures, 'energy': energies, 'magmoms': magmoms,\n 'n_below_1ub': n_below_1ub}"], {}), "({'structure': structures, 'energy': energies, 'magmoms':\n magmoms, 'n_below_1ub': n_below_1ub})\n", (30435, 30534), True, 'import pandas as pd\n'), ((34106, 34128), 'monty.json.jsanitize', 'jsanitize', (['self.ex_mat'], {}), '(self.ex_mat)\n', (34115, 34128), False, 'from monty.json import MSONable, jsanitize\n'), ((34160, 34191), 'monty.json.jsanitize', 'jsanitize', (['self.nn_interactions'], {}), '(self.nn_interactions)\n', (34169, 34191), False, 'from monty.json import MSONable, jsanitize\n'), ((34223, 34254), 'monty.json.jsanitize', 'jsanitize', (['self.unique_site_ids'], {}), '(self.unique_site_ids)\n', (34232, 34254), False, 'from monty.json import MSONable, jsanitize\n'), ((34282, 34309), 'monty.json.jsanitize', 'jsanitize', (['self.wyckoff_ids'], {}), '(self.wyckoff_ids)\n', (34291, 34309), False, 'from monty.json import MSONable, jsanitize\n'), ((35396, 35433), 'pymatgen.analysis.graphs.StructureGraph.from_dict', 'StructureGraph.from_dict', (["d['igraph']"], {}), "(d['igraph'])\n", (35420, 35433), False, 'from pymatgen.analysis.graphs import StructureGraph\n'), ((3997, 4008), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4005, 4008), False, 'import sys\n'), ((4653, 4705), 'pymatgen.analysis.local_env.MinimumDistanceNN', 'MinimumDistanceNN', ([], {'cutoff': 'cutoff', 'get_all_sites': '(True)'}), '(cutoff=cutoff, get_all_sites=True)\n', (4670, 4705), False, 'from pymatgen.analysis.local_env import MinimumDistanceNN\n'), ((4743, 4762), 'pymatgen.analysis.local_env.MinimumDistanceNN', 'MinimumDistanceNN', ([], {}), '()\n', (4760, 4762), False, 'from pymatgen.analysis.local_env import MinimumDistanceNN\n'), ((4843, 4903), 'pymatgen.analysis.graphs.StructureGraph.with_local_env_strategy', 'StructureGraph.with_local_env_strategy', (['s'], {'strategy': 'strategy'}), '(s, strategy=strategy)\n', (4881, 4903), False, 'from pymatgen.analysis.graphs import StructureGraph\n'), ((10742, 10764), 'copy.deepcopy', 'copy.deepcopy', (['sgraphs'], {}), '(sgraphs)\n', (10755, 10764), False, 'import copy\n'), ((19651, 19680), 'logging.warning', 'logging.warning', (['iamthedanger'], {}), '(iamthedanger)\n', (19666, 19680), False, 'import logging\n'), ((20537, 20581), 'numpy.zeros', 'np.zeros', (['(num_sublattices, num_sublattices)'], {}), '((num_sublattices, num_sublattices))\n', (20545, 20581), True, 'import numpy as np\n'), ((21149, 21169), 'numpy.linalg.eig', 'np.linalg.eig', (['omega'], {}), '(omega)\n', (21162, 21169), True, 'import numpy as np\n'), ((21454, 21491), 'logging.warning', 'logging.warning', (['stayoutofmyterritory'], {}), '(stayoutofmyterritory)\n', (21469, 21491), False, 'import logging\n'), ((22349, 22377), 'logging.warning', 'logging.warning', (['warning_msg'], {}), '(warning_msg)\n', (22364, 22377), False, 'import logging\n'), ((34802, 34817), 'ast.literal_eval', 'literal_eval', (['k'], {}), '(k)\n', (34814, 34817), False, 'from ast import literal_eval\n'), ((35032, 35047), 'ast.literal_eval', 'literal_eval', (['k'], {}), '(k)\n', (35044, 35047), False, 'from ast import literal_eval\n'), ((35561, 35591), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['ex_mat'], {}), '(ex_mat)\n', (35583, 35591), True, 'import pandas as pd\n'), ((5535, 5621), 'pymatgen.analysis.magnetism.CollinearMagneticStructureAnalyzer', 'CollinearMagneticStructureAnalyzer', (['structure'], {'make_primitive': '(False)', 'threshold': '(0.0)'}), '(structure, make_primitive=False,\n threshold=0.0)\n', (5569, 5621), False, 'from pymatgen.analysis.magnetism import CollinearMagneticStructureAnalyzer, Ordering\n'), ((5848, 5870), 'pymatgen.symmetry.analyzer.SpacegroupAnalyzer', 'SpacegroupAnalyzer', (['s0'], {}), '(s0)\n', (5866, 5870), False, 'from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n'), ((16357, 16431), 'pymatgen.analysis.magnetism.CollinearMagneticStructureAnalyzer', 'CollinearMagneticStructureAnalyzer', (['s'], {'threshold': '(0.0)', 'make_primitive': '(False)'}), '(s, threshold=0.0, make_primitive=False)\n', (16391, 16431), False, 'from pymatgen.analysis.magnetism import CollinearMagneticStructureAnalyzer, Ordering\n'), ((17889, 17975), 'pymatgen.analysis.magnetism.CollinearMagneticStructureAnalyzer', 'CollinearMagneticStructureAnalyzer', (['fm_struct'], {'make_primitive': '(False)', 'threshold': '(0.0)'}), '(fm_struct, make_primitive=False,\n threshold=0.0)\n', (17923, 17975), False, 'from pymatgen.analysis.magnetism import CollinearMagneticStructureAnalyzer, Ordering\n'), ((18077, 18164), 'pymatgen.analysis.magnetism.CollinearMagneticStructureAnalyzer', 'CollinearMagneticStructureAnalyzer', (['afm_struct'], {'make_primitive': '(False)', 'threshold': '(0.0)'}), '(afm_struct, make_primitive=False,\n threshold=0.0)\n', (18111, 18164), False, 'from pymatgen.analysis.magnetism import CollinearMagneticStructureAnalyzer, Ordering\n'), ((19253, 19268), 'numpy.sqrt', 'np.sqrt', (['(m ** 2)'], {}), '(m ** 2)\n', (19260, 19268), True, 'import numpy as np\n'), ((23040, 23064), 'monty.serialization.dumpfn', 'dumpfn', (['igraph', 'filename'], {}), '(igraph, filename)\n', (23046, 23064), False, 'from monty.serialization import dumpfn\n'), ((23135, 23159), 'monty.serialization.dumpfn', 'dumpfn', (['igraph', 'filename'], {}), '(igraph, filename)\n', (23141, 23159), False, 'from monty.serialization import dumpfn\n'), ((34652, 34668), 'ast.literal_eval', 'literal_eval', (['k1'], {}), '(k1)\n', (34664, 34668), False, 'from ast import literal_eval\n'), ((35239, 35261), 'pymatgen.Structure.from_dict', 'Structure.from_dict', (['v'], {}), '(v)\n', (35258, 35261), False, 'from pymatgen import Structure\n'), ((35321, 35348), 'pymatgen.analysis.graphs.StructureGraph.from_dict', 'StructureGraph.from_dict', (['v'], {}), '(v)\n', (35345, 35348), False, 'from pymatgen.analysis.graphs import StructureGraph\n'), ((35663, 35696), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['E', 'E0']"}), "(columns=['E', 'E0'])\n", (35675, 35696), True, 'import pandas as pd\n'), ((11070, 11097), 'numpy.zeros', 'np.zeros', (['(1, num_nn_j + 1)'], {}), '((1, num_nn_j + 1))\n', (11078, 11097), True, 'import numpy as np\n'), ((27920, 27994), 'pymatgen.analysis.magnetism.CollinearMagneticStructureAnalyzer', 'CollinearMagneticStructureAnalyzer', (['s'], {'make_primitive': '(False)', 'threshold': '(0.0)'}), '(s, make_primitive=False, threshold=0.0)\n', (27954, 27994), False, 'from pymatgen.analysis.magnetism import CollinearMagneticStructureAnalyzer, Ordering\n')] |
"""
This module is used to call CP2K simulation and parse its output
The user need to supply a complete input script with ENERGY_FORCE or ENERGY runtype, and CELL, COORD blocks. Example scripts can be found in tests/test_files/cp2k_input...
The module will copy the input template to a new file with "_run" suffix,
edit the atomic coordination in the COORD blocks and run the similation with
the parallel set up given.
We note that, if the CP2K executable is only for serial run, using it along with MPI setting can lead to repeating output in the output file, wrong number of forces and error in the other modules.
"""
import os
from subprocess import call
import time
import numpy as np
from flare import output
from flare import struc
from typing import List
name = "CP2K"
def run_dft_par(
dft_input,
structure,
dft_loc,
ncpus=1,
dft_out="dft.out",
npool=None,
mpi="mpi",
**dft_kwargs,
):
"""run DFT calculation with given input template
and atomic configurations. if ncpus == 1, it executes serial run.
:param dft_input: input template file name
:param structure: atomic configuration
:param dft_loc: relative/absolute executable of the DFT code
:param ncpus: # of CPU for mpi
:param dft_out: output file name
:param npool: not used
:param mpi: not used
:param **dft_wargs: not used
:return: forces
"""
newfilename = edit_dft_input_positions(dft_input, structure)
dft_command = f"{dft_loc} -i {newfilename}"
if ncpus > 1:
if mpi == "mpi":
dft_command = f"mpirun -np {ncpus} {dft_command}"
else:
dft_command = f"srun -n {ncpus} {dft_command}"
# output.write_to_output(dft_command+'\n')
with open(dft_out, "w+") as fout:
call(dft_command.split(), stdout=fout)
os.remove(newfilename)
return parse_dft_forces(dft_out)
def run_dft_en_par(
dft_input: str,
structure,
dft_loc: str,
ncpus: int,
dft_out: str = "dft.out",
npool: int = None,
mpi: str = "mpi",
**dft_kwargs,
):
"""run DFT calculation with given input template
and atomic configurations. This function is not used atm.
:param dft_input: input template file name
:param structure: atomic configuration
:param dft_loc: relative/absolute executable of the DFT code
:param ncpus: # of CPU for mpi
:param dft_out: output file name
:param npool: not used
:param mpi: not used
:param **dft_wargs: not used
:return: forces, energy
"""
newfilename = edit_dft_input_positions(dft_input, structure)
dft_command = f"{dft_loc} -i {newfilename} > {dft_out}"
if ncpus > 1:
dft_command = f"mpirun -np {ncpus} {dft_command}"
# output.write_to_output(dft_command+'\n')
call(dft_command, shell=True)
os.remove(newfilename)
forces, energy = parse_dft_forces_and_energy(dft_out)
return forces, energy
def parse_dft_input(dft_input: str):
"""Parse CP2K input file prepared by the user
the parser is very limited. The user have to define things
in a good format.
It requires the "CELL", "COORD" blocks
:param dft_input: file name
:return: positions, species, cell, masses
"""
positions = []
species = []
cell = []
with open(dft_input) as f:
lines = f.readlines()
# Find the cell and positions in the output file
cell_index = None
positions_index = None
nat = None
# species_index = None
for i, line in enumerate(lines):
if "&CELL" in line:
cell_index = int(i + 1)
elif "COORD" in line and "END" not in line:
positions_index = int(i + 1)
elif "&END" in line and (positions_index is not None) and (nat is None):
nat = i - positions_index
# if 'ATOMIC_SPECIES' in line:
# species_index = int(i + 1)
assert cell_index is not None, "Failed to find cell in input"
assert positions_index is not None, "Failed to find positions in input"
assert nat is not None, "Failed to find number of atoms in input"
# Load cell
# TO DO: allow to mess up the order of A, B, and C
for i in range(cell_index, cell_index + 3):
cell_line = list(map(float, lines[i].split()[1:]))
cell.append(cell_line) # np.fromstring(cell_line[1:], sep=' '))
cell = np.array(cell)
# Check cell IO
assert len(cell) != 0, "Cell failed to load"
assert np.shape(cell) == (3, 3), "Cell failed to load correctly"
# Load positions
for i in range(positions_index, positions_index + nat):
pos_line = lines[i].split()
species.append(pos_line[0])
# positions.append(np.fromstring(pos_string, sep=' '))
positions.append(list(map(float, pos_line[1:])))
# Check position IO
assert positions != [], "Positions failed to load"
positions = np.array(positions)
# see conversions.nb for conversion from amu to md units
ele_mass = {
"H": 1.007900,
"He": 4.002600,
"Li": 6.941000,
"Be": 9.012200,
"B": 10.811000,
"C": 12.010700,
"N": 14.006700,
"O": 15.999400,
"F": 18.998400,
"Ne": 20.179700,
"Na": 22.989700,
"Mg": 24.305000,
"Al": 26.981500,
"Si": 28.085500,
"P": 30.973800,
"S": 32.065000,
"Cl": 35.453000,
"K": 39.098300,
"Ar": 39.948000,
"Ca": 40.078000,
"Sc": 44.955900,
"Ti": 47.867000,
"V": 50.941500,
"Cr": 51.996100,
"Mn": 54.938000,
"Fe": 55.845000,
"Ni": 58.693400,
"Co": 58.933200,
"Cu": 63.546000,
"Zn": 65.390000,
"Ga": 69.723000,
"Ge": 72.640000,
"As": 74.921600,
"Se": 78.960000,
"Br": 79.904000,
"Kr": 83.800000,
"Rb": 85.467800,
"Sr": 87.620000,
"Y": 88.905900,
"Zr": 91.224000,
"Nb": 92.906400,
"Mo": 95.940000,
"Tc": 98.000000,
"Ru": 101.070000,
"Rh": 102.905500,
"Pd": 106.420000,
"Ag": 107.868200,
"Cd": 112.411000,
"In": 114.818000,
"Sn": 118.710000,
"Sb": 121.760000,
"I": 126.904500,
"Te": 127.600000,
"Xe": 131.293000,
"Cs": 132.905500,
"Ba": 137.327000,
"La": 138.905500,
"Ce": 140.116000,
"Pr": 140.907700,
"Nd": 144.240000,
"Pm": 145.000000,
"Sm": 150.360000,
"Eu": 151.964000,
"Gd": 157.250000,
"Tb": 158.925300,
"Dy": 162.500000,
"Ho": 164.930300,
"Er": 167.259000,
"Tm": 168.934200,
"Yb": 173.040000,
"Lu": 174.967000,
"Hf": 178.490000,
"Ta": 180.947900,
"W": 183.840000,
"Re": 186.207000,
"Os": 190.230000,
"Ir": 192.217000,
"Pt": 195.078000,
"Au": 196.966500,
"Hg": 200.590000,
"Tl": 204.383300,
"Pb": 207.200000,
"Bi": 208.980400,
"Po": 209.000000,
"At": 210.000000,
"Rn": 222.000000,
"Fr": 223.000000,
"Ra": 226.000000,
"Ac": 227.000000,
"Pa": 231.035900,
"Th": 232.038100,
"Np": 237.000000,
"U": 238.028900,
"Am": 243.000000,
"Pu": 244.000000,
"Cm": 247.000000,
"Bk": 247.000000,
"Cf": 251.000000,
"Es": 252.000000,
"Fm": 257.000000,
"Md": 258.000000,
"No": 259.000000,
"Rf": 261.000000,
"Lr": 262.000000,
"Db": 262.000000,
"Bh": 264.000000,
"Sg": 266.000000,
"Mt": 268.000000,
"Rg": 272.000000,
"Hs": 277.000000,
}
# TO DO: allow customize mass
massconvert = 0.000103642695727
masses = {}
for ele in ele_mass.keys():
# Expects lines of format like: H 1.0 H_pseudo_name.ext
masses[ele] = ele_mass[ele] * massconvert
return positions, species, cell, masses
def dft_input_to_structure(dft_input: str):
"""
Parses a qe input and returns the atoms in the file as a Structure object
:param dft_input: input file to parse
:return: atomic structure
"""
positions, species, cell, masses = parse_dft_input(dft_input)
_, coded_species = struc.get_unique_species(species)
return struc.Structure(
positions=positions,
species=coded_species,
cell=cell,
mass_dict=masses,
species_labels=species,
)
def edit_dft_input_positions(dft_input: str, structure):
"""Write the current configuration of the OTF structure to the
qe input file
:param dft_input: intput file name
:param structure: structure to print
:type structure: class Structure
:return newfilename: the name of the edited intput file.
with "_run" suffix
"""
with open(dft_input, "r") as f:
lines = f.readlines()
file_pos_index = None
cell_index = None
nat = None
for i, line in enumerate(lines):
if "&CELL" in line:
cell_index = int(i + 1)
if "&COORD" in line:
file_pos_index = int(i + 1)
if "&END" in line and (file_pos_index is not None):
nat = i - file_pos_index
# if 'ATOMIC_SPECIES' in line:
# species_index = int(i + 1)
assert file_pos_index is not None, "Failed to find positions in input"
assert cell_index is not None, "Failed to find cell in input"
assert nat is not None, "Failed to find nat in input"
for pos_index, line_index in enumerate(
range(file_pos_index, file_pos_index + structure.nat)
):
pos = structure.positions[pos_index]
specs = structure.species_labels[pos_index]
pos_string = f"{specs} {pos[0]} {pos[1]} {pos[2]}\n"
if line_index < len(lines):
lines[line_index] = pos_string
else:
lines.append(pos_string)
# # TODO current assumption: if there is a new structure, then the new
# # structure has fewer atoms than the previous one. If we are always
# # 'editing' a version of the larger structure than this will be okay with
# # the punchout method.
# for line_index in range(file_pos_index + structure.nat,
# file_pos_index + nat):
# lines[line_index] = ''
lines[cell_index] = "A " + " ".join([str(x) for x in structure.vec1]) + "\n"
lines[cell_index + 1] = "B " + " ".join([str(x) for x in structure.vec2]) + "\n"
lines[cell_index + 2] = "C " + " ".join([str(x) for x in structure.vec3]) + "\n"
newfilename = dft_input + "_run"
with open(newfilename, "w") as f:
for line in lines:
f.write(line)
return newfilename
def parse_dft_forces_and_energy(outfile: str):
"""Get forces from a pwscf file in eV/A
the input run type to be ENERGY_FORCE
:param outfile: str, Path to dft.output file
:return: list[nparray] , List of forces acting on atoms
:return: float, total potential energy
"""
forces = []
total_energy = np.nan
startforce = -1
with open(outfile, "r") as outf:
for line in outf:
if line.find("FORCE_EVAL") != -1:
total_energy = float(line.split()[8])
if startforce >= 2:
if line.find("SUM") != -1:
startforce = -1
else:
line = line.split()[3:]
forces.append(list(map(float, line)))
startforce += 1
elif startforce >= 0:
startforce += 1
elif line.find("FORCES") != -1 and line.find("in") != -1:
startforce = 0
assert total_energy != np.nan, (
"dft parser failed to read the file {}. Run failed." + outfile
)
# Convert from ry/au to ev/angstrom
conversion_factor = 25.71104309541616 * 2.0
forces = np.array(forces) * conversion_factor
total_energy *= 27.2114
return forces, total_energy
def parse_dft_forces(outfile: str):
"""Get forces from a pwscf file in eV/A
:param outfile: str, Path to dft.output file
:return: list[nparray] , List of forces acting on atoms
"""
f, e = parse_dft_forces_and_energy(outfile)
return f
| [
"os.remove",
"flare.struc.Structure",
"flare.struc.get_unique_species",
"numpy.shape",
"numpy.array",
"subprocess.call"
] | [((1840, 1862), 'os.remove', 'os.remove', (['newfilename'], {}), '(newfilename)\n', (1849, 1862), False, 'import os\n'), ((2821, 2850), 'subprocess.call', 'call', (['dft_command'], {'shell': '(True)'}), '(dft_command, shell=True)\n', (2825, 2850), False, 'from subprocess import call\n'), ((2855, 2877), 'os.remove', 'os.remove', (['newfilename'], {}), '(newfilename)\n', (2864, 2877), False, 'import os\n'), ((4395, 4409), 'numpy.array', 'np.array', (['cell'], {}), '(cell)\n', (4403, 4409), True, 'import numpy as np\n'), ((4919, 4938), 'numpy.array', 'np.array', (['positions'], {}), '(positions)\n', (4927, 4938), True, 'import numpy as np\n'), ((8402, 8435), 'flare.struc.get_unique_species', 'struc.get_unique_species', (['species'], {}), '(species)\n', (8426, 8435), False, 'from flare import struc\n'), ((8447, 8563), 'flare.struc.Structure', 'struc.Structure', ([], {'positions': 'positions', 'species': 'coded_species', 'cell': 'cell', 'mass_dict': 'masses', 'species_labels': 'species'}), '(positions=positions, species=coded_species, cell=cell,\n mass_dict=masses, species_labels=species)\n', (8462, 8563), False, 'from flare import struc\n'), ((4491, 4505), 'numpy.shape', 'np.shape', (['cell'], {}), '(cell)\n', (4499, 4505), True, 'import numpy as np\n'), ((12056, 12072), 'numpy.array', 'np.array', (['forces'], {}), '(forces)\n', (12064, 12072), True, 'import numpy as np\n')] |
from collections import namedtuple
import numpy as np
import numpy.random as nr
from ..arraystep import Competence
from .base_hmc import BaseHMC, HMCStepData, DivergenceInfo
from .integration import IntegrationError
from pymc3.backends.report import SamplerWarning, WarningType
from pymc3.theanof import floatX
from pymc3.vartypes import continuous_types
__all__ = ['NUTS']
def logbern(log_p):
if np.isnan(log_p):
raise FloatingPointError("log_p can't be nan.")
return np.log(nr.uniform()) < log_p
class NUTS(BaseHMC):
R"""A sampler for continuous variables based on Hamiltonian mechanics.
NUTS automatically tunes the step size and the number of steps per
sample. A detailed description can be found at [1], "Algorithm 6:
Efficient No-U-Turn Sampler with Dual Averaging".
NUTS provides a number of statistics that can be accessed with
`trace.get_sampler_stats`:
- `mean_tree_accept`: The mean acceptance probability for the tree
that generated this sample. The mean of these values across all
samples but the burn-in should be approximately `target_accept`
(the default for this is 0.8).
- `diverging`: Whether the trajectory for this sample diverged. If
there are any divergences after burnin, this indicates that
the results might not be reliable. Reparametrization can
often help, but you can also try to increase `target_accept` to
something like 0.9 or 0.95.
- `energy`: The energy at the point in phase-space where the sample
was accepted. This can be used to identify posteriors with
problematically long tails. See below for an example.
- `energy_change`: The difference in energy between the start and
the end of the trajectory. For a perfect integrator this would
always be zero.
- `max_energy_change`: The maximum difference in energy along the
whole trajectory.
- `depth`: The depth of the tree that was used to generate this sample
- `tree_size`: The number of leafs of the sampling tree, when the
sample was accepted. This is usually a bit less than
`2 ** depth`. If the tree size is large, the sampler is
using a lot of leapfrog steps to find the next sample. This can for
example happen if there are strong correlations in the posterior,
if the posterior has long tails, if there are regions of high
curvature ("funnels"), or if the variance estimates in the mass
matrix are inaccurate. Reparametrisation of the model or estimating
the posterior variances from past samples might help.
- `tune`: This is `True`, if step size adaptation was turned on when
this sample was generated.
- `step_size`: The step size used for this sample.
- `step_size_bar`: The current best known step-size. After the tuning
samples, the step size is set to this value. This should converge
during tuning.
- `model_logp`: The model log-likelihood for this sample.
References
----------
.. [1] Hoffman, <NAME>., & <NAME>. (2011). The No-U-Turn
Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo.
"""
name = 'nuts'
default_blocked = True
generates_stats = True
stats_dtypes = [{
'depth': np.int64,
'step_size': np.float64,
'tune': np.bool,
'mean_tree_accept': np.float64,
'step_size_bar': np.float64,
'tree_size': np.float64,
'diverging': np.bool,
'energy_error': np.float64,
'energy': np.float64,
'max_energy_error': np.float64,
'model_logp': np.float64,
}]
def __init__(self, vars=None, max_treedepth=10, early_max_treedepth=8,
**kwargs):
R"""Set up the No-U-Turn sampler.
Parameters
----------
vars : list of Theano variables, default all continuous vars
Emax : float, default 1000
Maximum energy change allowed during leapfrog steps. Larger
deviations will abort the integration.
target_accept : float, default .8
Adapt the step size such that the average acceptance
probability across the trajectories are close to target_accept.
Higher values for target_accept lead to smaller step sizes.
Setting this to higher values like 0.9 or 0.99 can help
with sampling from difficult posteriors. Valid values are
between 0 and 1 (exclusive).
step_scale : float, default 0.25
Size of steps to take, automatically scaled down by `1/n**(1/4)`.
If step size adaptation is switched off, the resulting step size
is used. If adaptation is enabled, it is used as initial guess.
gamma : float, default .05
k : float, default .75
Parameter for dual averaging for step size adaptation. Values
between 0.5 and 1 (exclusive) are admissible. Higher values
correspond to slower adaptation.
t0 : int, default 10
Parameter for dual averaging. Higher values slow initial
adaptation.
adapt_step_size : bool, default=True
Whether step size adaptation should be enabled. If this is
disabled, `k`, `t0`, `gamma` and `target_accept` are ignored.
max_treedepth : int, default=10
The maximum tree depth. Trajectories are stopped when this
depth is reached.
early_max_treedepth : int, default=8
The maximum tree depth during the first 200 tuning samples.
scaling : array_like, ndim = {1,2}
The inverse mass, or precision matrix. One dimensional arrays are
interpreted as diagonal matrices. If `is_cov` is set to True,
this will be interpreded as the mass or covariance matrix.
is_cov : bool, default=False
Treat the scaling as mass or covariance matrix.
potential : Potential, optional
An object that represents the Hamiltonian with methods `velocity`,
`energy`, and `random` methods. It can be specified instead
of the scaling matrix.
model : pymc3.Model
The model
kwargs: passed to BaseHMC
Notes
-----
The step size adaptation stops when `self.tune` is set to False.
This is usually achieved by setting the `tune` parameter if
`pm.sample` to the desired number of tuning steps.
"""
super().__init__(vars, **kwargs)
self.max_treedepth = max_treedepth
self.early_max_treedepth = early_max_treedepth
self._reached_max_treedepth = 0
def _hamiltonian_step(self, start, p0, step_size):
if self.tune and self.iter_count < 200:
max_treedepth = self.early_max_treedepth
else:
max_treedepth = self.max_treedepth
tree = _Tree(len(p0), self.integrator, start, step_size, self.Emax)
for _ in range(max_treedepth):
direction = logbern(np.log(0.5)) * 2 - 1
divergence_info, turning = tree.extend(direction)
if divergence_info or turning:
break
else:
if not self.tune:
self._reached_max_treedepth += 1
stats = tree.stats()
accept_stat = stats['mean_tree_accept']
return HMCStepData(tree.proposal, accept_stat, divergence_info, stats)
@staticmethod
def competence(var, has_grad):
"""Check how appropriate this class is for sampling a random variable."""
if var.dtype in continuous_types and has_grad:
return Competence.IDEAL
return Competence.INCOMPATIBLE
def warnings(self):
warnings = super().warnings()
n_samples = self._samples_after_tune
n_treedepth = self._reached_max_treedepth
if n_samples > 0 and n_treedepth / float(n_samples) > 0.05:
msg = ('The chain reached the maximum tree depth. Increase '
'max_treedepth, increase target_accept or reparameterize.')
warn = SamplerWarning(WarningType.TREEDEPTH, msg, 'warn',
None, None, None)
warnings.append(warn)
return warnings
# A proposal for the next position
Proposal = namedtuple("Proposal", "q, q_grad, energy, p_accept, logp")
# A subtree of the binary tree built by nuts.
Subtree = namedtuple(
"Subtree",
"left, right, p_sum, proposal, log_size, accept_sum, n_proposals")
class _Tree:
def __init__(self, ndim, integrator, start, step_size, Emax):
"""Binary tree from the NUTS algorithm.
Parameters
----------
leapfrog : function
A function that performs a single leapfrog step.
start : integration.State
The starting point of the trajectory.
step_size : float
The step size to use in this tree
Emax : float
The maximum energy change to accept before aborting the
transition as diverging.
"""
self.ndim = ndim
self.integrator = integrator
self.start = start
self.step_size = step_size
self.Emax = Emax
self.start_energy = np.array(start.energy)
self.left = self.right = start
self.proposal = Proposal(
start.q, start.q_grad, start.energy, 1.0, start.model_logp)
self.depth = 0
self.log_size = 0
self.accept_sum = 0
self.n_proposals = 0
self.p_sum = start.p.copy()
self.max_energy_change = 0
def extend(self, direction):
"""Double the treesize by extending the tree in the given direction.
If direction is larger than 0, extend it to the right, otherwise
extend it to the left.
Return a tuple `(diverging, turning)` of type (DivergenceInfo, bool).
`diverging` indicates, that the tree extension was aborted because
the energy change exceeded `self.Emax`. `turning` indicates that
the tree extension was stopped because the termination criterior
was reached (the trajectory is turning back).
"""
if direction > 0:
tree, diverging, turning = self._build_subtree(
self.right, self.depth, floatX(np.asarray(self.step_size)))
leftmost_begin, leftmost_end = self.left, self.right
rightmost_begin, rightmost_end = tree.left, tree.right
leftmost_p_sum = self.p_sum
rightmost_p_sum = tree.p_sum
self.right = tree.right
else:
tree, diverging, turning = self._build_subtree(
self.left, self.depth, floatX(np.asarray(-self.step_size)))
leftmost_begin, leftmost_end = tree.right, tree.left
rightmost_begin, rightmost_end = self.left, self.right
leftmost_p_sum = tree.p_sum
rightmost_p_sum = self.p_sum
self.left = tree.right
self.depth += 1
self.accept_sum += tree.accept_sum
self.n_proposals += tree.n_proposals
if diverging or turning:
return diverging, turning
size1, size2 = self.log_size, tree.log_size
if logbern(size2 - size1):
self.proposal = tree.proposal
self.log_size = np.logaddexp(self.log_size, tree.log_size)
self.p_sum[:] += tree.p_sum
# Additional turning check only when tree depth > 0 to avoid redundant work
if self.depth > 0:
left, right = self.left, self.right
p_sum = self.p_sum
turning = (p_sum.dot(left.v) <= 0) or (p_sum.dot(right.v) <= 0)
p_sum1 = leftmost_p_sum + rightmost_begin.p
turning1 = (p_sum1.dot(leftmost_begin.v) <= 0) or (p_sum1.dot(rightmost_begin.v) <= 0)
p_sum2 = leftmost_end.p + rightmost_p_sum
turning2 = (p_sum2.dot(leftmost_end.v) <= 0) or (p_sum2.dot(rightmost_end.v) <= 0)
turning = (turning | turning1 | turning2)
return diverging, turning
def _single_step(self, left, epsilon):
"""Perform a leapfrog step and handle error cases."""
try:
right = self.integrator.step(epsilon, left)
except IntegrationError as err:
error_msg = str(err)
error = err
else:
energy_change = right.energy - self.start_energy
if np.isnan(energy_change):
energy_change = np.inf
if np.abs(energy_change) > np.abs(self.max_energy_change):
self.max_energy_change = energy_change
if np.abs(energy_change) < self.Emax:
p_accept = min(1, np.exp(-energy_change))
log_size = -energy_change
proposal = Proposal(
right.q, right.q_grad, right.energy, p_accept, right.model_logp)
tree = Subtree(right, right, right.p,
proposal, log_size, p_accept, 1)
return tree, None, False
else:
error_msg = ("Energy change in leapfrog step is too large: %s."
% energy_change)
error = None
tree = Subtree(None, None, None, None, -np.inf, 0, 1)
divergance_info = DivergenceInfo(error_msg, error, left)
return tree, divergance_info, False
def _build_subtree(self, left, depth, epsilon):
if depth == 0:
return self._single_step(left, epsilon)
tree1, diverging, turning = self._build_subtree(
left, depth - 1, epsilon)
if diverging or turning:
return tree1, diverging, turning
tree2, diverging, turning = self._build_subtree(
tree1.right, depth - 1, epsilon)
left, right = tree1.left, tree2.right
if not (diverging or turning):
p_sum = tree1.p_sum + tree2.p_sum
turning = (p_sum.dot(left.v) <= 0) or (p_sum.dot(right.v) <= 0)
# Additional U turn check only when depth > 1 to avoid redundant work.
if depth - 1 > 0:
p_sum1 = tree1.p_sum + tree2.left.p
turning1 = (p_sum1.dot(tree1.left.v) <= 0) or (p_sum1.dot(tree2.left.v) <= 0)
p_sum2 = tree1.right.p + tree2.p_sum
turning2 = (p_sum2.dot(tree1.right.v) <= 0) or (p_sum2.dot(tree2.right.v) <= 0)
turning = (turning | turning1 | turning2)
log_size = np.logaddexp(tree1.log_size, tree2.log_size)
if logbern(tree2.log_size - log_size):
proposal = tree2.proposal
else:
proposal = tree1.proposal
else:
p_sum = tree1.p_sum
log_size = tree1.log_size
proposal = tree1.proposal
accept_sum = tree1.accept_sum + tree2.accept_sum
n_proposals = tree1.n_proposals + tree2.n_proposals
tree = Subtree(left, right, p_sum, proposal,
log_size, accept_sum, n_proposals)
return tree, diverging, turning
def stats(self):
return {
'depth': self.depth,
'mean_tree_accept': self.accept_sum / self.n_proposals,
'energy_error': self.proposal.energy - self.start.energy,
'energy': self.proposal.energy,
'tree_size': self.n_proposals,
'max_energy_error': self.max_energy_change,
'model_logp': self.proposal.logp,
}
| [
"numpy.random.uniform",
"numpy.abs",
"numpy.log",
"numpy.asarray",
"numpy.isnan",
"numpy.logaddexp",
"numpy.array",
"collections.namedtuple",
"pymc3.backends.report.SamplerWarning",
"numpy.exp"
] | [((8313, 8372), 'collections.namedtuple', 'namedtuple', (['"""Proposal"""', '"""q, q_grad, energy, p_accept, logp"""'], {}), "('Proposal', 'q, q_grad, energy, p_accept, logp')\n", (8323, 8372), False, 'from collections import namedtuple\n'), ((8430, 8522), 'collections.namedtuple', 'namedtuple', (['"""Subtree"""', '"""left, right, p_sum, proposal, log_size, accept_sum, n_proposals"""'], {}), "('Subtree',\n 'left, right, p_sum, proposal, log_size, accept_sum, n_proposals')\n", (8440, 8522), False, 'from collections import namedtuple\n'), ((406, 421), 'numpy.isnan', 'np.isnan', (['log_p'], {}), '(log_p)\n', (414, 421), True, 'import numpy as np\n'), ((9256, 9278), 'numpy.array', 'np.array', (['start.energy'], {}), '(start.energy)\n', (9264, 9278), True, 'import numpy as np\n'), ((11333, 11375), 'numpy.logaddexp', 'np.logaddexp', (['self.log_size', 'tree.log_size'], {}), '(self.log_size, tree.log_size)\n', (11345, 11375), True, 'import numpy as np\n'), ((497, 509), 'numpy.random.uniform', 'nr.uniform', ([], {}), '()\n', (507, 509), True, 'import numpy.random as nr\n'), ((8104, 8172), 'pymc3.backends.report.SamplerWarning', 'SamplerWarning', (['WarningType.TREEDEPTH', 'msg', '"""warn"""', 'None', 'None', 'None'], {}), "(WarningType.TREEDEPTH, msg, 'warn', None, None, None)\n", (8118, 8172), False, 'from pymc3.backends.report import SamplerWarning, WarningType\n'), ((12434, 12457), 'numpy.isnan', 'np.isnan', (['energy_change'], {}), '(energy_change)\n', (12442, 12457), True, 'import numpy as np\n'), ((14504, 14548), 'numpy.logaddexp', 'np.logaddexp', (['tree1.log_size', 'tree2.log_size'], {}), '(tree1.log_size, tree2.log_size)\n', (14516, 14548), True, 'import numpy as np\n'), ((12514, 12535), 'numpy.abs', 'np.abs', (['energy_change'], {}), '(energy_change)\n', (12520, 12535), True, 'import numpy as np\n'), ((12538, 12568), 'numpy.abs', 'np.abs', (['self.max_energy_change'], {}), '(self.max_energy_change)\n', (12544, 12568), True, 'import numpy as np\n'), ((12640, 12661), 'numpy.abs', 'np.abs', (['energy_change'], {}), '(energy_change)\n', (12646, 12661), True, 'import numpy as np\n'), ((10317, 10343), 'numpy.asarray', 'np.asarray', (['self.step_size'], {}), '(self.step_size)\n', (10327, 10343), True, 'import numpy as np\n'), ((10715, 10742), 'numpy.asarray', 'np.asarray', (['(-self.step_size)'], {}), '(-self.step_size)\n', (10725, 10742), True, 'import numpy as np\n'), ((12709, 12731), 'numpy.exp', 'np.exp', (['(-energy_change)'], {}), '(-energy_change)\n', (12715, 12731), True, 'import numpy as np\n'), ((7041, 7052), 'numpy.log', 'np.log', (['(0.5)'], {}), '(0.5)\n', (7047, 7052), True, 'import numpy as np\n')] |
#! user/bin/python3
import numpy as np
class Bfunc:
def __init__(self, sol):
self.func = getattr(self, sol)
def __call__(self, r, r0, *args, **kwargs):
return self.transform(r, r0, self.func, *args, **kwargs)
def transform(self, r, r0, func, *args, **kwargs):
r = np.array(r)
r0 = np.array(r0)
r = r - r0
return func(r, *args, **kwargs)
@staticmethod
def line(r, L, I=1):
return I * L * r
@staticmethod
def loop(r, R, I=1):
return I * R*R * r
if __name__ == '__main__':
r = [1,1,1]
r0 = [0.5, 0.5, 0.5]
L = 2
R = 2
B = Bfunc('loop')(r, r0, R, I=1)
print(B)
B = Bfunc('line')(r, r0, L=L, I=1)
print(B)
| [
"numpy.array"
] | [((303, 314), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (311, 314), True, 'import numpy as np\n'), ((328, 340), 'numpy.array', 'np.array', (['r0'], {}), '(r0)\n', (336, 340), True, 'import numpy as np\n')] |
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from torch import nn
from tianshou.utils.net.discrete import NoisyLinear
class DQN(nn.Module):
"""Reference: Human-level control through deep reinforcement learning.
For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
"""
def __init__(
self,
c: int,
h: int,
w: int,
action_shape: Sequence[int],
device: Union[str, int, torch.device] = "cpu",
features_only: bool = False,
output_dim: Optional[int] = None,
) -> None:
super().__init__()
self.device = device
self.net = nn.Sequential(
nn.Conv2d(c, 32, kernel_size=8, stride=4), nn.ReLU(inplace=True),
nn.Conv2d(32, 64, kernel_size=4, stride=2), nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, stride=1), nn.ReLU(inplace=True),
nn.Flatten()
)
with torch.no_grad():
self.output_dim = np.prod(self.net(torch.zeros(1, c, h, w)).shape[1:])
if not features_only:
self.net = nn.Sequential(
self.net, nn.Linear(self.output_dim, 512), nn.ReLU(inplace=True),
nn.Linear(512, np.prod(action_shape))
)
self.output_dim = np.prod(action_shape)
elif output_dim is not None:
self.net = nn.Sequential(
self.net, nn.Linear(self.output_dim, output_dim),
nn.ReLU(inplace=True)
)
self.output_dim = output_dim
def forward(
self,
obs: Union[np.ndarray, torch.Tensor],
state: Optional[Any] = None,
info: Dict[str, Any] = {},
) -> Tuple[torch.Tensor, Any]:
r"""Mapping: s -> Q(s, \*)."""
obs = torch.as_tensor(obs, device=self.device, dtype=torch.float32)
return self.net(obs), state
class C51(DQN):
"""Reference: A distributional perspective on reinforcement learning.
For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
"""
def __init__(
self,
c: int,
h: int,
w: int,
action_shape: Sequence[int],
num_atoms: int = 51,
device: Union[str, int, torch.device] = "cpu",
) -> None:
self.action_num = np.prod(action_shape)
super().__init__(c, h, w, [self.action_num * num_atoms], device)
self.num_atoms = num_atoms
def forward(
self,
obs: Union[np.ndarray, torch.Tensor],
state: Optional[Any] = None,
info: Dict[str, Any] = {},
) -> Tuple[torch.Tensor, Any]:
r"""Mapping: x -> Z(x, \*)."""
obs, state = super().forward(obs)
obs = obs.view(-1, self.num_atoms).softmax(dim=-1)
obs = obs.view(-1, self.action_num, self.num_atoms)
return obs, state
class Rainbow(DQN):
"""Reference: Rainbow: Combining Improvements in Deep Reinforcement Learning.
For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
"""
def __init__(
self,
c: int,
h: int,
w: int,
action_shape: Sequence[int],
num_atoms: int = 51,
noisy_std: float = 0.5,
device: Union[str, int, torch.device] = "cpu",
is_dueling: bool = True,
is_noisy: bool = True,
) -> None:
super().__init__(c, h, w, action_shape, device, features_only=True)
self.action_num = np.prod(action_shape)
self.num_atoms = num_atoms
def linear(x, y):
if is_noisy:
return NoisyLinear(x, y, noisy_std)
else:
return nn.Linear(x, y)
self.Q = nn.Sequential(
linear(self.output_dim, 512), nn.ReLU(inplace=True),
linear(512, self.action_num * self.num_atoms)
)
self._is_dueling = is_dueling
if self._is_dueling:
self.V = nn.Sequential(
linear(self.output_dim, 512), nn.ReLU(inplace=True),
linear(512, self.num_atoms)
)
self.output_dim = self.action_num * self.num_atoms
def forward(
self,
obs: Union[np.ndarray, torch.Tensor],
state: Optional[Any] = None,
info: Dict[str, Any] = {},
) -> Tuple[torch.Tensor, Any]:
r"""Mapping: x -> Z(x, \*)."""
obs, state = super().forward(obs)
q = self.Q(obs)
q = q.view(-1, self.action_num, self.num_atoms)
if self._is_dueling:
v = self.V(obs)
v = v.view(-1, 1, self.num_atoms)
logits = q - q.mean(dim=1, keepdim=True) + v
else:
logits = q
probs = logits.softmax(dim=2)
return probs, state
class QRDQN(DQN):
"""Reference: Distributional Reinforcement Learning with Quantile \
Regression.
For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
"""
def __init__(
self,
c: int,
h: int,
w: int,
action_shape: Sequence[int],
num_quantiles: int = 200,
device: Union[str, int, torch.device] = "cpu",
) -> None:
self.action_num = np.prod(action_shape)
super().__init__(c, h, w, [self.action_num * num_quantiles], device)
self.num_quantiles = num_quantiles
def forward(
self,
obs: Union[np.ndarray, torch.Tensor],
state: Optional[Any] = None,
info: Dict[str, Any] = {},
) -> Tuple[torch.Tensor, Any]:
r"""Mapping: x -> Z(x, \*)."""
obs, state = super().forward(obs)
obs = obs.view(-1, self.action_num, self.num_quantiles)
return obs, state | [
"torch.nn.ReLU",
"torch.nn.Conv2d",
"tianshou.utils.net.discrete.NoisyLinear",
"torch.nn.Linear",
"torch.zeros",
"torch.as_tensor",
"torch.no_grad",
"numpy.prod",
"torch.nn.Flatten"
] | [((1861, 1922), 'torch.as_tensor', 'torch.as_tensor', (['obs'], {'device': 'self.device', 'dtype': 'torch.float32'}), '(obs, device=self.device, dtype=torch.float32)\n', (1876, 1922), False, 'import torch\n'), ((2403, 2424), 'numpy.prod', 'np.prod', (['action_shape'], {}), '(action_shape)\n', (2410, 2424), True, 'import numpy as np\n'), ((3572, 3593), 'numpy.prod', 'np.prod', (['action_shape'], {}), '(action_shape)\n', (3579, 3593), True, 'import numpy as np\n'), ((5319, 5340), 'numpy.prod', 'np.prod', (['action_shape'], {}), '(action_shape)\n', (5326, 5340), True, 'import numpy as np\n'), ((747, 788), 'torch.nn.Conv2d', 'nn.Conv2d', (['c', '(32)'], {'kernel_size': '(8)', 'stride': '(4)'}), '(c, 32, kernel_size=8, stride=4)\n', (756, 788), False, 'from torch import nn\n'), ((790, 811), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (797, 811), False, 'from torch import nn\n'), ((825, 867), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)'], {'kernel_size': '(4)', 'stride': '(2)'}), '(32, 64, kernel_size=4, stride=2)\n', (834, 867), False, 'from torch import nn\n'), ((869, 890), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (876, 890), False, 'from torch import nn\n'), ((904, 946), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)'}), '(64, 64, kernel_size=3, stride=1)\n', (913, 946), False, 'from torch import nn\n'), ((948, 969), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (955, 969), False, 'from torch import nn\n'), ((983, 995), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (993, 995), False, 'from torch import nn\n'), ((1019, 1034), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1032, 1034), False, 'import torch\n'), ((1367, 1388), 'numpy.prod', 'np.prod', (['action_shape'], {}), '(action_shape)\n', (1374, 1388), True, 'import numpy as np\n'), ((3865, 3886), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3872, 3886), False, 'from torch import nn\n'), ((1213, 1244), 'torch.nn.Linear', 'nn.Linear', (['self.output_dim', '(512)'], {}), '(self.output_dim, 512)\n', (1222, 1244), False, 'from torch import nn\n'), ((1246, 1267), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1253, 1267), False, 'from torch import nn\n'), ((3704, 3732), 'tianshou.utils.net.discrete.NoisyLinear', 'NoisyLinear', (['x', 'y', 'noisy_std'], {}), '(x, y, noisy_std)\n', (3715, 3732), False, 'from tianshou.utils.net.discrete import NoisyLinear\n'), ((3774, 3789), 'torch.nn.Linear', 'nn.Linear', (['x', 'y'], {}), '(x, y)\n', (3783, 3789), False, 'from torch import nn\n'), ((4105, 4126), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4112, 4126), False, 'from torch import nn\n'), ((1300, 1321), 'numpy.prod', 'np.prod', (['action_shape'], {}), '(action_shape)\n', (1307, 1321), True, 'import numpy as np\n'), ((1490, 1528), 'torch.nn.Linear', 'nn.Linear', (['self.output_dim', 'output_dim'], {}), '(self.output_dim, output_dim)\n', (1499, 1528), False, 'from torch import nn\n'), ((1546, 1567), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1553, 1567), False, 'from torch import nn\n'), ((1083, 1106), 'torch.zeros', 'torch.zeros', (['(1)', 'c', 'h', 'w'], {}), '(1, c, h, w)\n', (1094, 1106), False, 'import torch\n')] |
#! /usr/bin/env python
"""
matrixDFT exercising driver for Fourier Optics 2017 class
<EMAIL> 2018
"""
import sys, os, time
import numpy as np
from astropy.io import fits
import poppy.matrixDFT as matrixDFT
def makedisk(s, ctr=None, radius=None):
return make_ellipse(s, ctr=ctr, ellpars = (radius,radius,0.0))
def makegauss(s, ctr=None, sigma=None):
# choose a pixel-centric center default for odd-sizes, pixelcornercentric for even
if ctr is None:
ctr = (s/2.0, s/2.0)
xx = np.linspace(-ctr[0]+0.5, s-ctr[0]-0.5, s)
yy = np.linspace(-ctr[1]+0.5, s-ctr[1]-0.5, s)
(x,y) = np.meshgrid(xx, yy.T)
gauss = np.exp(-0.5*x*x/sigma - 0.5*y*y/sigma)
return gauss
def make_ellipse(s, ctr=None, ellpars = None):
"""
rotated ellipse function using Alex' modern array index use
s = integer, number of pixels on a side of square array
ellpars = (semimajor, semiminor, rotation in degrees)
s = 20: default ctr is [10.5,10.5] in DS9, i.e. a pixel corner (even case)
s = 21: [11.0, 11.0] in DS9, i.e. central pixel is center of ellipse
semimajor is horiz in DS9
rot = 30: CCW rotation, semimajor points to 2 o'clock
"""
# choose a pixel-centric center default for odd-sizes, pixelcornercentric for even
if ctr is None:
ctr = (s/2.0, s/2.0)
# print "s", s, "ctr", ctr
xx = np.linspace(-ctr[0]+0.5, s-ctr[0]-0.5, s)
yy = np.linspace(-ctr[1]+0.5, s-ctr[1]-0.5, s)
(x,y) = np.meshgrid(xx, yy.T)
deg = -np.pi/180.0 # minus sign seen here ... any reason? anand
semimajor, semiminor, theta = ellpars
esq = (x*np.cos(theta/deg) - y*np.sin(theta/deg))**2 / semimajor**2 + \
(y*np.cos(theta/deg) + x*np.sin(theta/deg))**2 / semiminor**2
array = np.zeros((s,s))
array[esq<1] = 1
return array
def example():
"""
Generate Point Spread Functions (PSFs) of perfect circular
unobstructed telescope using a Matrix Fourier Transform (not an FFT).
<EMAIL>
"""
# create output directory if it does not exist
pathname = os.path.dirname(".")
fullPath = os.path.abspath(pathname)
odir = fullPath + '/_testMFT_odir'
if not os.path.exists(odir):
os.makedirs(odir)
# instantiate an mft object:
ft = matrixDFT.MatrixFourierTransform()
#create a pupil array & write to fits file
narray = 101
radius = 50
pupil = makedisk(narray, radius=radius)
fits.PrimaryHDU(data=pupil).writeto(odir+"/pup.fits", overwrite=True)
# create a point source's image
# calculate the complex amplitude and write the intensity (PSF) file out
fov_reselt = 9 # field of view, units of lam/D
pixperreselt = 5 # number of pixels per lambda/D resolution element
npix = int(fov_reselt * pixperreselt)
imagefield = ft.perform(pupil, fov_reselt, npix)
image_intensity = (imagefield*imagefield.conj()).real
psf = image_intensity / image_intensity.max() # peak intensity unity
# write a fits file of PSF
hdu = fits.PrimaryHDU( )
hdu.header['fov']= (fov_reselt, 'field of view in lam/D')
hdu.header['pixelscl']= (pixperreselt, 'nr of image samples per lam/D')
hdu.header['normaliz']= ('Unity peak', 'PSF normalization method')
hdu.header['filter']= ('Monochromatic', 'bandpass name')
hdu.header['src']= ('testMFT.py', 'anand0xff')
hdu.data = psf.astype(np.float32) # peak intensity unity
hdu.writeto(odir+'/psf_{}.fits'.format(pixperreselt), overwrite = True)
if __name__ == "__main__":
example()
| [
"os.path.abspath",
"numpy.meshgrid",
"os.makedirs",
"poppy.matrixDFT.MatrixFourierTransform",
"os.path.dirname",
"astropy.io.fits.PrimaryHDU",
"numpy.zeros",
"os.path.exists",
"numpy.sin",
"numpy.exp",
"numpy.linspace",
"numpy.cos"
] | [((509, 556), 'numpy.linspace', 'np.linspace', (['(-ctr[0] + 0.5)', '(s - ctr[0] - 0.5)', 's'], {}), '(-ctr[0] + 0.5, s - ctr[0] - 0.5, s)\n', (520, 556), True, 'import numpy as np\n'), ((561, 608), 'numpy.linspace', 'np.linspace', (['(-ctr[1] + 0.5)', '(s - ctr[1] - 0.5)', 's'], {}), '(-ctr[1] + 0.5, s - ctr[1] - 0.5, s)\n', (572, 608), True, 'import numpy as np\n'), ((615, 636), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'yy.T'], {}), '(xx, yy.T)\n', (626, 636), True, 'import numpy as np\n'), ((649, 699), 'numpy.exp', 'np.exp', (['(-0.5 * x * x / sigma - 0.5 * y * y / sigma)'], {}), '(-0.5 * x * x / sigma - 0.5 * y * y / sigma)\n', (655, 699), True, 'import numpy as np\n'), ((1371, 1418), 'numpy.linspace', 'np.linspace', (['(-ctr[0] + 0.5)', '(s - ctr[0] - 0.5)', 's'], {}), '(-ctr[0] + 0.5, s - ctr[0] - 0.5, s)\n', (1382, 1418), True, 'import numpy as np\n'), ((1423, 1470), 'numpy.linspace', 'np.linspace', (['(-ctr[1] + 0.5)', '(s - ctr[1] - 0.5)', 's'], {}), '(-ctr[1] + 0.5, s - ctr[1] - 0.5, s)\n', (1434, 1470), True, 'import numpy as np\n'), ((1477, 1498), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'yy.T'], {}), '(xx, yy.T)\n', (1488, 1498), True, 'import numpy as np\n'), ((1775, 1791), 'numpy.zeros', 'np.zeros', (['(s, s)'], {}), '((s, s))\n', (1783, 1791), True, 'import numpy as np\n'), ((2078, 2098), 'os.path.dirname', 'os.path.dirname', (['"""."""'], {}), "('.')\n", (2093, 2098), False, 'import sys, os, time\n'), ((2114, 2139), 'os.path.abspath', 'os.path.abspath', (['pathname'], {}), '(pathname)\n', (2129, 2139), False, 'import sys, os, time\n'), ((2281, 2315), 'poppy.matrixDFT.MatrixFourierTransform', 'matrixDFT.MatrixFourierTransform', ([], {}), '()\n', (2313, 2315), True, 'import poppy.matrixDFT as matrixDFT\n'), ((3023, 3040), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (3038, 3040), False, 'from astropy.io import fits\n'), ((2190, 2210), 'os.path.exists', 'os.path.exists', (['odir'], {}), '(odir)\n', (2204, 2210), False, 'import sys, os, time\n'), ((2220, 2237), 'os.makedirs', 'os.makedirs', (['odir'], {}), '(odir)\n', (2231, 2237), False, 'import sys, os, time\n'), ((2446, 2473), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'data': 'pupil'}), '(data=pupil)\n', (2461, 2473), False, 'from astropy.io import fits\n'), ((1628, 1647), 'numpy.cos', 'np.cos', (['(theta / deg)'], {}), '(theta / deg)\n', (1634, 1647), True, 'import numpy as np\n'), ((1650, 1669), 'numpy.sin', 'np.sin', (['(theta / deg)'], {}), '(theta / deg)\n', (1656, 1669), True, 'import numpy as np\n'), ((1704, 1723), 'numpy.cos', 'np.cos', (['(theta / deg)'], {}), '(theta / deg)\n', (1710, 1723), True, 'import numpy as np\n'), ((1726, 1745), 'numpy.sin', 'np.sin', (['(theta / deg)'], {}), '(theta / deg)\n', (1732, 1745), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
import copy
from math import inf
import tempfile
from typing import Any, Dict, Type, cast
import unittest
import numpy as np
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
import fairscale.optim as optim
from fairscale.utils.testing import check_same_model_params, skip_if_no_cuda, skip_if_py39_no_cuda, skip_if_single_gpu
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO # type: ignore
DEVICE = "cuda" if torch.cuda.is_available() else torch.device("cpu")
RECIPIENT_RANK = 1
try:
from torch.distributed import broadcast_object_list # noqa
_torch_broadcast_object = True
except ImportError:
from fairscale.optim.utils import broadcast_object # noqa
_torch_broadcast_object = False
def dist_init(rank, world_size, tempfile_name, backend=BACKEND):
url = "file://" + tempfile_name
dist.init_process_group(init_method=url, backend=backend, rank=rank, world_size=world_size)
def sync_object_ranks(something_to_sync: Any, reference_rank: int, device: torch.device) -> Any:
if _torch_broadcast_object:
package = [something_to_sync]
dist.broadcast_object_list(package, src=reference_rank, group=dist.group.WORLD)
package_sync = package[0]
else:
package_sync = optim.utils.broadcast_object(
something_to_sync, src_rank=reference_rank, group=dist.group.WORLD, dist_device=device
)
return package_sync
class TestSingleRank(unittest.TestCase):
"""
All the following tests do not check for inter-process communication
"""
def setUp(self):
dist_init(0, 1, tempfile.mkstemp()[1])
def tearDown(self):
torch.distributed.destroy_process_group()
def test_create(self):
params = [torch.rand(1)]
o = optim.OSS(params, lr=0.01)
def test_state_dict(self):
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1, momentum=0.9)
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.0], device=DEVICE)
o.zero_grad()
o.consolidate_state_dict() # Sync state dict in between replicas - even if there are none
state_dict = o.state_dict()
# Check that the state dict is pytorch-compliant key wise
assert "param_groups" in state_dict.keys()
assert "state" in state_dict.keys()
# Check that the pulled state is what we expect, and that we have all the expected keys
assert state_dict["param_groups"][0]["lr"] == 0.1
assert state_dict["param_groups"][0]["momentum"] == 0.9
assert not state_dict["param_groups"][0]["nesterov"]
assert state_dict["param_groups"][0]["weight_decay"] == 0.0
assert state_dict["param_groups"][0]["dampening"] == 0.0
# Check that the pulled state and the .param_groups attribute are in sync
for k in state_dict["param_groups"][0].keys():
if k != "params":
assert state_dict["param_groups"][0][k] == o.param_groups[0][k]
# Check that it's correctly loaded
o = optim.OSS([x], lr=0.01)
o.load_state_dict(state_dict)
# Check that state is correct and on proper device
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.0], device=DEVICE)
# We should now be using a lr of 0.1, both within the optimizer
# and as exposed by the .param_groups attribute
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
assert x == torch.tensor([0.71], device=DEVICE)
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.9], device=DEVICE)
# Check that the exposed param_groups are on the proper device
assert o.param_groups[0]["params"][0].device == x.device
def test_lr_scheduler(self):
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
x2 = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.01)
o2 = torch.optim.SGD([x2], lr=0.01)
s = torch.optim.lr_scheduler.StepLR(o, 1)
s2 = torch.optim.lr_scheduler.StepLR(o2, 1)
for _ in range(5):
x.backward()
o.zero_grad()
o.step()
s.step()
x2.backward()
o2.zero_grad()
o2.step()
s2.step()
assert x == x2
def test_step_with_kwargs(self):
class SGDWithStepKWArg(torch.optim.SGD):
def step(self, closure=None, kwarg=[]):
super().step()
kwarg.append(5)
kwarg = []
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithStepKWArg, lr=0.1)
x.backward()
o.step(0, kwarg=kwarg)
assert kwarg == [5]
assert x == torch.tensor([0.9], device=DEVICE)
def test_step_with_extra_inner_key(self):
class SGDWithNewKey(torch.optim.SGD):
# Dummy optimizer which adds a new key to the param groups
def step(self, closure=None):
super().step()
self.param_groups[0]["new_key"] = 0.1
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithNewKey, lr=0.1)
x.backward()
o.step()
assert o.param_groups[0]["new_key"] == 0.1
assert x == torch.tensor([0.9], device=DEVICE)
def test_step_without_closure(self):
class SGDWithoutClosure(torch.optim.SGD):
def step(self):
return super().step()
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithoutClosure, lr=0.1)
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
def test_implicit_local_state_dict(self):
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1)
with pytest.raises(RuntimeError):
_ = o.state_dict()
def run_test_add_param_group(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name)
# Test with all parameters trainable to begin with
def all_trainable():
params = []
sizes = [9, 7, 5, 3]
sizes_world = sizes * world_size
for size in sizes_world[:-1]:
params.append(torch.rand(size, 1))
# Make sure that the params are trainable, enforces size-based partitioning
for p in params:
p.requires_grad = True
o = optim.OSS(params, lr=0.1)
assert len(o.param_groups) == 1
o.add_param_group({"params": [torch.rand(3, 1)]})
assert len(o.param_groups) == 2
# Verify that added group is added to the correct partition making all have the same number of elements
assert sum([x.numel() for g in o.optim.param_groups for x in g["params"]]) == sum(sizes)
assert len(o.optim.param_groups) == 2
# Test a pathological config with a first big non-trainable param
def some_trainable():
params = []
for size in [100, 3, 5, 2, 6, 4]:
params.append(torch.rand(size, 1))
# Make sure that the params are trainable, enforces size-based partitioning
for p in params[1:]:
p.requires_grad = True
o = optim.OSS(params, lr=0.1)
assert len(o.param_groups) == 1
o.add_param_group({"params": [torch.rand(3, 1)]})
assert len(o.param_groups) == 2
assert len(o.optim.param_groups) == 2
all_trainable()
some_trainable()
dist.destroy_process_group()
def test_add_param_group():
world_size = 4
if torch.cuda.is_available() and torch.cuda.device_count() < world_size:
world_size = min(world_size, torch.cuda.device_count())
mp.spawn(run_test_add_param_group, args=(world_size, tempfile.mkstemp()[1]), nprocs=world_size, join=True)
def run_test_zero_grad(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name)
x = torch.rand(1)
m = torch.nn.Linear(1, 1)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
assert m.weight.grad
assert m.bias.grad
o.zero_grad()
assert not m.weight.grad
assert not m.bias.grad
dist.destroy_process_group()
def test_zero_grad():
world_size = 2
if torch.cuda.is_available() and torch.cuda.device_count() < world_size:
world_size = min(world_size, torch.cuda.device_count())
temp_file_name = tempfile.mkstemp()[1]
mp.spawn(run_test_zero_grad, args=(world_size, temp_file_name), nprocs=world_size, join=True)
def run_test_catch_empty_shardd(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name, backend="gloo")
m = torch.nn.Linear(1, 1)
with pytest.raises(AssertionError):
_ = optim.OSS(m.parameters(), lr=0.1)
dist.destroy_process_group()
def test_empty_shard():
world_size = 4
mp.spawn(run_test_catch_empty_shardd, args=(world_size, tempfile.mkstemp()[1]), nprocs=world_size, join=True)
def run_test_step(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name, backend="gloo")
x = torch.tensor([float(rank + 1)], device=rank)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[1.0]])
m.bias.data = torch.tensor([2.0])
m.to(rank)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= world_size
o.step()
assert m.weight == torch.tensor([[0.75]], device=rank)
assert m.bias == torch.tensor([1.85], device=rank)
dist.destroy_process_group()
@skip_if_single_gpu
def test_step():
world_size = 2
temp_file_name = tempfile.mkstemp()[1]
mp.spawn(run_test_step, args=(world_size, temp_file_name), nprocs=world_size, join=True)
def run_test_step_with_closure(rank, world_size, tempfile_name, optimizer=None):
dist_init(rank, world_size, tempfile_name)
x_val = rank + 1
weight = 1.0
bias = 2.0
error = 1.0
target = torch.tensor([x_val * weight + bias + error], device=rank)
loss_fn = torch.nn.L1Loss()
x = torch.tensor([float(x_val)], device=rank)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[weight]])
m.bias.data = torch.tensor([bias])
m.to(rank)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= world_size
def closure():
o.zero_grad()
output = m(x)
loss = loss_fn(output, target)
loss.backward()
return loss
loss = o.step(closure=closure)
assert loss == torch.tensor(error, device=rank)
assert m.weight == torch.tensor([[1.1]], device=rank)
assert m.bias == torch.tensor([2.1], device=rank)
dist.destroy_process_group()
@skip_if_no_cuda
def test_step_with_closure():
world_size = min(2, torch.cuda.device_count())
temp_file_name = tempfile.mkstemp()[1]
mp.spawn(run_test_step_with_closure, args=(world_size, temp_file_name), nprocs=world_size, join=True)
def run_test_sharding(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name)
params = []
sizes = [9, 7, 5, 3]
sizes_world = sizes * world_size
for size in sizes_world:
params.append(torch.rand(size, 1))
# Make sure that the params are trainable, enforces size-based partitioning
for p in params:
p.requires_grad = True
o = optim.OSS(params, lr=0.1)
assert sum([x.numel() for x in o.optim.param_groups[0]["params"]]) == sum(sizes)
dist.destroy_process_group()
def test_sharding():
world_size = 4
if torch.cuda.is_available():
world_size = min(world_size, torch.cuda.device_count())
_, temp_file_name = tempfile.mkstemp()
mp.spawn(run_test_sharding, args=(world_size, temp_file_name), nprocs=world_size, join=True)
def run_test_collect_shards(rank, world_size, reference_rank, tempfile_name):
dist_init(rank, world_size, tempfile_name)
device = torch.device(rank) if torch.cuda.device_count() > 1 else DEVICE
# Run a dummy step so that the optimizer state dict exists
batch, input_width, hidden, target_width = 3, 3, 3, 5
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width))
model.to(device)
loss_fn = torch.nn.L1Loss()
loss_fn.to(device)
# With SGD, Momentum is required to get a state to shard
optimizer = optim.OSS(model.parameters(), lr=0.1, momentum=0.99)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss.backward()
return loss
_ = optimizer.step(closure=closure)
# Update the optimizer state on the reference rank
optimizer.consolidate_state_dict(recipient_rank=reference_rank)
# Fetch the state on the reference rank
# - check that it has the correct size
# - load it again
if rank == reference_rank:
optimizer_state_dict = optimizer.state_dict()
assert len(optimizer_state_dict["state"]) == len(list(model.parameters()))
else:
optimizer_state_dict = {}
# distribute to the other ranks
optimizer_state_dict = sync_object_ranks(optimizer_state_dict, reference_rank, device)
# Load the optimizer state dict
optimizer.load_state_dict(optimizer_state_dict)
dist.destroy_process_group()
def test_collect_shards():
world_size = 3
temp_file_name = tempfile.mkstemp()[1]
if torch.cuda.is_available():
world_size = min(world_size, torch.cuda.device_count())
reference_rank = 0
mp.spawn(
run_test_collect_shards, args=(world_size, reference_rank, temp_file_name), nprocs=world_size, join=True,
)
def run_test_reproducibility(rank, world_size, reference_rank, tempfile_name):
dist_init(rank, world_size, tempfile_name)
device = torch.device(rank) if torch.cuda.device_count() > 1 else DEVICE
# Run a dummy step so that the optimizer state dict exists
batch, input_width, hidden, target_width = 3, 3, 3, 5
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width))
model.to(device)
loss_fn = torch.nn.L1Loss()
loss_fn.to(device)
optimizer = optim.OSS(model.parameters(), optim=torch.optim.RMSprop, lr=0.1)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss.backward()
return loss
_ = optimizer.step(closure=closure)
# Update the optimizer state on the reference rank
optimizer.consolidate_state_dict(recipient_rank=reference_rank)
# Fetch the state on the reference rank, broadcast to the other ones
if rank == reference_rank:
optimizer_state_dict = optimizer.state_dict()
else:
optimizer_state_dict = {}
# Run two steps, log the loss
_ = optimizer.step(closure=closure)
reference_loss = optimizer.step(closure=closure)
# Load the optimizer state dict, rewind the state two steps back
optimizer.load_state_dict(optimizer_state_dict)
# Run two new steps, log the loss again and check that we get the same
_ = optimizer.step(closure=closure)
test_loss = optimizer.step(closure=closure)
assert torch.allclose(reference_loss, test_loss)
dist.destroy_process_group()
def test_reproducibility():
world_size = 2
temp_file_name = tempfile.mkstemp()[1]
if torch.cuda.is_available() and torch.cuda.device_count() < world_size:
# Bail out if not enough devices
return
reference_rank = 0
mp.spawn(
run_test_collect_shards, args=(world_size, reference_rank, temp_file_name), nprocs=world_size, join=True,
)
def run_test_multiple_groups(rank, world_size, tempfile_name):
# Only work with the even ranks, to check that the global_rank indexing is properly used
dist_init(rank=rank, world_size=world_size, tempfile_name=tempfile_name, backend="gloo")
sub_group_ranks = [0, 2, 4]
process_group = torch.distributed.new_group(ranks=sub_group_ranks, backend="gloo")
# Make sure that all the ranks get different training data
# So that the sync check in between their models is meaningful
torch.manual_seed(rank)
np.random.seed(rank)
# Standard deep learning setup
device = "cpu"
epochs, batch, input_width, hidden, target_width = 5, 3, 20, 10, 5
loss_fn = torch.nn.L1Loss().to(device)
def check(optimizer):
# Just run a couple of epochs, check that the model is properly updated
for _ in range(epochs):
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss /= world_size
loss.backward()
dist.all_reduce(loss, group=process_group) # Not strictly needed for the test below
return loss
_ = optimizer.step(closure=closure)
# Check that all the params are the same on all ranks
for pg in optimizer.param_groups:
for p in pg["params"]:
receptacle = [p.clone() for _ in sub_group_ranks] if rank == 0 else []
dist.gather(p, receptacle, dst=0, group=process_group)
if rank == 0:
for sync_p in receptacle[1:]:
assert torch.all(
torch.eq(receptacle[0], sync_p)
), "Models differ in between ranks {} - {}".format(
torch.norm(receptacle[0]), torch.norm(sync_p)
)
if rank in sub_group_ranks:
# Model fitting in the broadcast bucket
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width)).to(
device
)
# With SGD, Momentum is required to get a state to shard
optimizer = optim.OSS(
model.parameters(), lr=0.1, momentum=0.99, group=process_group, broadcast_buffer_size=2 ** 20
)
check(optimizer)
# Model not-fitting in the broadcast bucket
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width)).to(
device
)
# With SGD, Momentum is required to get a state to shard
optimizer = optim.OSS(model.parameters(), lr=0.1, momentum=0.99, group=process_group, broadcast_buffer_size=0)
check(optimizer)
dist.destroy_process_group(process_group)
@skip_if_py39_no_cuda
def test_multiple_groups():
world_size = 6
temp_file_name = tempfile.mkstemp()[1]
mp.spawn(
run_test_multiple_groups, args=(world_size, temp_file_name), nprocs=world_size, join=True,
)
def run_gradient_clipping(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name, backend="gloo")
device = torch.device(rank)
torch.manual_seed(rank) # make sure that the different rank get different data
# Run a dummy step so that the optimizer state dict exists
batch, input_width, hidden, target_width = 3, 20, 10, 5
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
NORMS = [1.0, 2.0, 1, 2, inf]
CLIP_NORM = 0.3
def check(norm):
model_oss = torch.nn.Sequential(
torch.nn.Linear(input_width, hidden),
torch.nn.Linear(hidden, hidden),
torch.nn.Linear(hidden, target_width),
).to(device)
model = copy.deepcopy(model_oss)
# For this test the gradients are (all) reduced in the same way in between the torch reference and fairscale.
# Normally OSS would use ShardedDDP and only reduce to the proper rank, but this does not change the
# gradient norm computation from OSS and adds a dependency.
# to keep the comparison apples-to-apples DDP is used in both cases
model_oss = DDP(module=model_oss, device_ids=[rank],)
sharded_optimizer = optim.OSS(model_oss.parameters(), lr=0.1, momentum=0.99)
model = DDP(model, device_ids=[rank],)
loss_fn = torch.nn.L1Loss()
loss_fn.to(device)
model.zero_grad()
model_oss.zero_grad()
outputs = model(inputs)
outputs_oss = model_oss(inputs)
loss = loss_fn(outputs, target)
loss.backward()
loss_oss = loss_fn(outputs_oss, target)
loss_oss.backward()
torch.testing.assert_allclose(loss_oss, loss)
# Check the equivalence with the non-sharded optim
oss_total_norm = sharded_optimizer.clip_grad_norm(CLIP_NORM, norm_type=norm)
total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), CLIP_NORM, norm_type=norm)
assert torch.allclose(oss_total_norm, total_norm), "torch and fairscale should return the same grad norm"
# Check that the params have indeed been clipped
for params in sharded_optimizer.per_device_params.values():
for param in filter(lambda x: x.grad is not None, params[rank]):
assert torch.norm(param.grad, p=norm) < CLIP_NORM, f"param grad norm above clip : {param.grad}"
for norm in NORMS:
print(f"Checking norm {norm}")
check(norm)
# Check twice, catch an hypothetic iterator dumb mistake
check(norm)
dist.destroy_process_group()
@skip_if_no_cuda
def test_gradient_clipping():
world_size = 3
temp_file_name = tempfile.mkstemp()[1]
if torch.cuda.is_available():
world_size = min(world_size, torch.cuda.device_count())
reference_rank = 0
mp.spawn(
run_gradient_clipping, args=(world_size, temp_file_name), nprocs=world_size, join=True,
)
def run_state_dict_distributed(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name, backend="gloo")
device = torch.device(rank)
torch.manual_seed(rank) # make sure that the different rank get different data
# Setup two problems in parallel, we'll make sure that the second track (with save/load) follows the first one(untouched)
# We split the model in two to test the multiple param groups support
batch, input_width, hidden, target_width = 3, 20, 10, 5
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
model_oss1 = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, hidden)).to(device)
head_oss1 = torch.nn.Linear(hidden, target_width).to(device)
model_oss2 = copy.deepcopy(model_oss1)
head_oss2 = copy.deepcopy(head_oss1)
# For this test the gradients are (all) reduced in the same way in between the torch reference and fairscale.
# Normally OSS would use ShardedDDP and only reduce to the proper rank, but this does not change the
# gradient norm computation from OSS and adds a dependency.
# to keep the comparison apples-to-apples DDP is used in both cases
model_oss1 = DDP(module=model_oss1, device_ids=[rank],)
sharded_optimizer1 = optim.OSS(model_oss1.parameters(), lr=0.1, momentum=0.99)
sharded_optimizer1.add_param_group({"params": head_oss1.parameters()})
model_oss2 = DDP(module=model_oss2, device_ids=[rank],)
sharded_optimizer2 = optim.OSS(model_oss2.parameters(), lr=0.1, momentum=0.99)
sharded_optimizer2.add_param_group({"params": head_oss2.parameters()})
loss_fn = torch.nn.L1Loss().to(device)
def run_grad_step(model, head, optimizer):
model.zero_grad()
outputs = head(model(inputs))
# pull the current state, broadcast it to all ranks
sharded_optimizer2.consolidate_state_dict(recipient_rank=RECIPIENT_RANK) # all ranks
state_dict2 = sharded_optimizer2.state_dict() if rank == RECIPIENT_RANK else {}
state_dict2 = sync_object_ranks(state_dict2, RECIPIENT_RANK, device)
# re-create a new optimizer from scratch with absurd values, load the previous state
sharded_optimizer2 = optim.OSS(model_oss2.parameters(), lr=1e6, momentum=0.0001)
sharded_optimizer2.add_param_group({"params": head_oss2.parameters()})
sharded_optimizer2.load_state_dict(state_dict2)
check_same_model_params(
model_oss1, model_oss2, "parameters of the two identical models have diverged (before any steps)"
)
# now take a step and check that parameters are equal
run_grad_step(model_oss1, head_oss1, sharded_optimizer1)
run_grad_step(model_oss2, head_oss2, sharded_optimizer2)
check_same_model_params(
model_oss1, model_oss2, "parameters of the two identical models have diverged (after stepping)"
)
# save the state dict for one model only, then distribute to the other ranks
sharded_optimizer2.consolidate_state_dict(recipient_rank=RECIPIENT_RANK) # all ranks
state_dict2 = sharded_optimizer2.state_dict() if rank == RECIPIENT_RANK else {}
state_dict2 = sync_object_ranks(state_dict2, RECIPIENT_RANK, device)
# Check that the pulled state and the .param_groups attribute are in sync
for replica in range(len(state_dict2["param_groups"])):
for k in state_dict2["param_groups"][replica].keys():
if k != "params":
assert state_dict2["param_groups"][replica][k] == sharded_optimizer2.param_groups[0][k]
# take a step
run_grad_step(model_oss1, head_oss1, sharded_optimizer1)
run_grad_step(model_oss2, head_oss2, sharded_optimizer2)
check_same_model_params(
model_oss1, model_oss2, "parameters of the two identical models have diverged (after consolidating)"
)
# save again for one rank, then distribute to the others
sharded_optimizer2.consolidate_state_dict(recipient_rank=RECIPIENT_RANK) # all ranks
state_dict2 = sharded_optimizer2.state_dict() if rank == RECIPIENT_RANK else {}
state_dict2 = sync_object_ranks(state_dict2, RECIPIENT_RANK, device)
# reload the state_dict
sharded_optimizer2 = optim.OSS(model_oss2.parameters(), lr=0.1, momentum=0.99)
sharded_optimizer2.add_param_group({"params": head_oss2.parameters()})
sharded_optimizer2.load_state_dict(state_dict2)
# take a step
run_grad_step(model_oss1, head_oss1, sharded_optimizer1)
run_grad_step(model_oss2, head_oss2, sharded_optimizer2)
check_same_model_params(
model_oss1, model_oss2, "parameters of the two identical models have diverged (after reloading)"
)
dist.destroy_process_group()
@skip_if_no_cuda
def test_state_dict_distributed():
world_size = 2
temp_file_name = tempfile.mkstemp()[1]
if torch.cuda.is_available():
world_size = max(world_size, torch.cuda.device_count())
mp.spawn(
run_state_dict_distributed, args=(world_size, temp_file_name), nprocs=world_size, join=True,
)
def run_ddp_parity(rank, world_size, backend, temp_file_name):
url = "file://" + temp_file_name
dist.init_process_group(init_method=url, backend=backend, rank=rank, world_size=world_size)
device = torch.device("cuda")
torch.cuda.set_device(rank)
torch.manual_seed(rank)
np.random.seed(rank)
hidden = 5
in_channels = 3
out_channels = 3
batch = 64
def check_optimizer_equivalence(optimizer: Type[torch.optim.Optimizer], change_train_graph: bool = False):
# Any model works. Add one different buffer per rank
trunk = torch.nn.Sequential(
torch.nn.Linear(in_channels, hidden), torch.nn.Linear(hidden, hidden), torch.nn.Linear(hidden, hidden)
)
trunk.register_buffer("test_buffer", torch.ones((1)) * rank)
trunk.to(device)
head = torch.nn.Linear(hidden, out_channels).to(device)
# Define a model to be trained by OSS
oss_module = torch.nn.Sequential(trunk, head)
oss_trainable_params = [
{"params": trunk.parameters(), "lr": 1e-5},
{"params": head.parameters(), "lr": 1e-4},
]
optimizer_settings: Dict[Any, Any] = {}
if isinstance(optimizer, torch.optim.SGD):
optimizer_settings["momentum"] = 0.9
sharded_optimizer = optim.OSS(
params=oss_trainable_params,
optim=optimizer,
group=None,
broadcast_buffer_size=2 ** 10,
**optimizer_settings,
)
oss_ddp_model = DDP(module=oss_module, device_ids=[rank], broadcast_buffers=True, find_unused_parameters=True)
# Define a model to be trained by normal pytorch + DDP
ddp_trunk = copy.deepcopy(trunk)
ddp_head = copy.deepcopy(head)
ddp_module = torch.nn.Sequential(ddp_trunk, ddp_head)
ddp_trainable_params = [
{"params": ddp_trunk.parameters(), "lr": 1e-5},
{"params": ddp_head.parameters(), "lr": 1e-4},
]
ddp_optimizer = optimizer(ddp_trainable_params, **optimizer_settings) # type: ignore
ddp_model = DDP(module=ddp_module, device_ids=[rank], broadcast_buffers=True, find_unused_parameters=True)
def check_step():
input_tensor = torch.rand((batch, in_channels)).to(device)
def closure_ddp(input_tensor=input_tensor):
ddp_optimizer.zero_grad()
ddp_loss = ddp_model(input_tensor).abs().sum()
ddp_loss.backward()
return ddp_loss
def closure_sharded(input_tensor=input_tensor):
sharded_optimizer.zero_grad()
sharded_loss = oss_ddp_model(input_tensor).abs().sum()
sharded_loss.backward()
return sharded_loss
loss_ddp = cast(torch.Tensor, ddp_optimizer.step(closure=closure_ddp))
loss_sharded_optim = cast(torch.Tensor, sharded_optimizer.step(closure=closure_sharded))
assert torch.allclose(
loss_ddp, loss_sharded_optim, rtol=1e-3
), f"Losses differ in between Pytorch optim and OSS\n {loss_ddp.item()} - {loss_sharded_optim.item()} - world size {world_size}"
check_same_model_params(oss_ddp_model, ddp_model)
# The model should be synchronized in between the ranks at construction time, check that
check_same_model_params(oss_ddp_model, ddp_model)
# The models should stay the same in between ddp and sharded optimizer
for i in range(5):
check_step()
# Check that altering the trainable parameters does not cause DDP and OSS to diverge
if change_train_graph:
# Flip the first parameter from trainable to non-trainable and vice-versa
next(ddp_module.parameters()).requires_grad = not next(ddp_module.parameters()).requires_grad
next(oss_module.parameters()).requires_grad = not next(oss_module.parameters()).requires_grad
# sharded_optimizer.refresh_trainable()
# Check that the checkpoints are compatible
# - get states
ddp_state_dict = ddp_optimizer.state_dict()
sharded_optimizer.consolidate_state_dict(recipient_rank=RECIPIENT_RANK)
sharded_optim_state_dict = sharded_optimizer.state_dict() if rank == RECIPIENT_RANK else {}
sharded_optim_state_dict = sync_object_ranks(sharded_optim_state_dict, RECIPIENT_RANK, device)
# - cross load the states
# run one step and check that the models are still the same
ddp_state_dict_ref = copy.deepcopy(ddp_state_dict) # OSS will remove some states
ddp_optimizer.load_state_dict(sharded_optim_state_dict) # mixup on purpose !
sharded_optimizer.load_state_dict(ddp_state_dict)
check_step()
# - self load, rewind, check no problem
# run one step and check that the models are still the same
ddp_optimizer.load_state_dict(ddp_state_dict_ref)
sharded_optimizer.load_state_dict(sharded_optim_state_dict)
check_step()
for opt in [torch.optim.Adam, torch.optim.SGD]:
check_optimizer_equivalence(opt, change_train_graph=False)
check_optimizer_equivalence(opt, change_train_graph=True)
dist.destroy_process_group()
@skip_if_no_cuda
@skip_if_single_gpu
def test_ddp_parity():
temp_file_name = tempfile.mkstemp()[1]
world_size = torch.cuda.device_count()
backend = dist.Backend.NCCL
mp.spawn(run_ddp_parity, args=(world_size, backend, temp_file_name), nprocs=world_size, join=True)
| [
"numpy.random.seed",
"torch.optim.lr_scheduler.StepLR",
"torch.testing.assert_allclose",
"torch.cuda.device_count",
"torch.device",
"torch.distributed.gather",
"torch.ones",
"torch.nn.parallel.DistributedDataParallel",
"pytest.raises",
"fairscale.optim.OSS",
"torch.nn.Linear",
"torch.cuda.set_... | [((797, 822), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (820, 822), False, 'import torch\n'), ((881, 906), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (904, 906), False, 'import torch\n'), ((912, 931), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (924, 931), False, 'import torch\n'), ((1284, 1379), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'init_method': 'url', 'backend': 'backend', 'rank': 'rank', 'world_size': 'world_size'}), '(init_method=url, backend=backend, rank=rank,\n world_size=world_size)\n', (1307, 1379), True, 'import torch.distributed as dist\n'), ((8106, 8134), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (8132, 8134), True, 'import torch.distributed as dist\n'), ((8551, 8564), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (8561, 8564), False, 'import torch\n'), ((8573, 8594), 'torch.nn.Linear', 'torch.nn.Linear', (['(1)', '(1)'], {}), '(1, 1)\n', (8588, 8594), False, 'import torch\n'), ((8795, 8823), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (8821, 8823), True, 'import torch.distributed as dist\n'), ((9056, 9154), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_test_zero_grad'], {'args': '(world_size, temp_file_name)', 'nprocs': 'world_size', 'join': '(True)'}), '(run_test_zero_grad, args=(world_size, temp_file_name), nprocs=\n world_size, join=True)\n', (9064, 9154), True, 'import torch.multiprocessing as mp\n'), ((9289, 9310), 'torch.nn.Linear', 'torch.nn.Linear', (['(1)', '(1)'], {}), '(1, 1)\n', (9304, 9310), False, 'import torch\n'), ((9402, 9430), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (9428, 9430), True, 'import torch.distributed as dist\n'), ((9769, 9790), 'torch.nn.Linear', 'torch.nn.Linear', (['(1)', '(1)'], {}), '(1, 1)\n', (9784, 9790), False, 'import torch\n'), ((9811, 9832), 'torch.tensor', 'torch.tensor', (['[[1.0]]'], {}), '([[1.0]])\n', (9823, 9832), False, 'import torch\n'), ((9851, 9870), 'torch.tensor', 'torch.tensor', (['[2.0]'], {}), '([2.0])\n', (9863, 9870), False, 'import torch\n'), ((10213, 10241), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (10239, 10241), True, 'import torch.distributed as dist\n'), ((10348, 10441), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_test_step'], {'args': '(world_size, temp_file_name)', 'nprocs': 'world_size', 'join': '(True)'}), '(run_test_step, args=(world_size, temp_file_name), nprocs=\n world_size, join=True)\n', (10356, 10441), True, 'import torch.multiprocessing as mp\n'), ((10650, 10708), 'torch.tensor', 'torch.tensor', (['[x_val * weight + bias + error]'], {'device': 'rank'}), '([x_val * weight + bias + error], device=rank)\n', (10662, 10708), False, 'import torch\n'), ((10723, 10740), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (10738, 10740), False, 'import torch\n'), ((10800, 10821), 'torch.nn.Linear', 'torch.nn.Linear', (['(1)', '(1)'], {}), '(1, 1)\n', (10815, 10821), False, 'import torch\n'), ((10842, 10866), 'torch.tensor', 'torch.tensor', (['[[weight]]'], {}), '([[weight]])\n', (10854, 10866), False, 'import torch\n'), ((10885, 10905), 'torch.tensor', 'torch.tensor', (['[bias]'], {}), '([bias])\n', (10897, 10905), False, 'import torch\n'), ((11471, 11499), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (11497, 11499), True, 'import torch.distributed as dist\n'), ((11648, 11753), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_test_step_with_closure'], {'args': '(world_size, temp_file_name)', 'nprocs': 'world_size', 'join': '(True)'}), '(run_test_step_with_closure, args=(world_size, temp_file_name),\n nprocs=world_size, join=True)\n', (11656, 11753), True, 'import torch.multiprocessing as mp\n'), ((12148, 12173), 'fairscale.optim.OSS', 'optim.OSS', (['params'], {'lr': '(0.1)'}), '(params, lr=0.1)\n', (12157, 12173), True, 'import fairscale.optim as optim\n'), ((12264, 12292), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (12290, 12292), True, 'import torch.distributed as dist\n'), ((12342, 12367), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12365, 12367), False, 'import torch\n'), ((12458, 12476), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (12474, 12476), False, 'import tempfile\n'), ((12481, 12578), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_test_sharding'], {'args': '(world_size, temp_file_name)', 'nprocs': 'world_size', 'join': '(True)'}), '(run_test_sharding, args=(world_size, temp_file_name), nprocs=\n world_size, join=True)\n', (12489, 12578), True, 'import torch.multiprocessing as mp\n'), ((12913, 12961), 'torch.rand', 'torch.rand', (['(batch, target_width)'], {'device': 'device'}), '((batch, target_width), device=device)\n', (12923, 12961), False, 'import torch\n'), ((12975, 13022), 'torch.rand', 'torch.rand', (['(batch, input_width)'], {'device': 'device'}), '((batch, input_width), device=device)\n', (12985, 13022), False, 'import torch\n'), ((13169, 13186), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (13184, 13186), False, 'import torch\n'), ((14213, 14241), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (14239, 14241), True, 'import torch.distributed as dist\n'), ((14341, 14366), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14364, 14366), False, 'import torch\n'), ((14460, 14578), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_test_collect_shards'], {'args': '(world_size, reference_rank, temp_file_name)', 'nprocs': 'world_size', 'join': '(True)'}), '(run_test_collect_shards, args=(world_size, reference_rank,\n temp_file_name), nprocs=world_size, join=True)\n', (14468, 14578), True, 'import torch.multiprocessing as mp\n'), ((14930, 14978), 'torch.rand', 'torch.rand', (['(batch, target_width)'], {'device': 'device'}), '((batch, target_width), device=device)\n', (14940, 14978), False, 'import torch\n'), ((14992, 15039), 'torch.rand', 'torch.rand', (['(batch, input_width)'], {'device': 'device'}), '((batch, input_width), device=device)\n', (15002, 15039), False, 'import torch\n'), ((15186, 15203), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (15201, 15203), False, 'import torch\n'), ((16267, 16308), 'torch.allclose', 'torch.allclose', (['reference_loss', 'test_loss'], {}), '(reference_loss, test_loss)\n', (16281, 16308), False, 'import torch\n'), ((16314, 16342), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (16340, 16342), True, 'import torch.distributed as dist\n'), ((16598, 16716), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_test_collect_shards'], {'args': '(world_size, reference_rank, temp_file_name)', 'nprocs': 'world_size', 'join': '(True)'}), '(run_test_collect_shards, args=(world_size, reference_rank,\n temp_file_name), nprocs=world_size, join=True)\n', (16606, 16716), True, 'import torch.multiprocessing as mp\n'), ((17031, 17097), 'torch.distributed.new_group', 'torch.distributed.new_group', ([], {'ranks': 'sub_group_ranks', 'backend': '"""gloo"""'}), "(ranks=sub_group_ranks, backend='gloo')\n", (17058, 17097), False, 'import torch\n'), ((17233, 17256), 'torch.manual_seed', 'torch.manual_seed', (['rank'], {}), '(rank)\n', (17250, 17256), False, 'import torch\n'), ((17261, 17281), 'numpy.random.seed', 'np.random.seed', (['rank'], {}), '(rank)\n', (17275, 17281), True, 'import numpy as np\n'), ((19710, 19751), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', (['process_group'], {}), '(process_group)\n', (19736, 19751), True, 'import torch.distributed as dist\n'), ((19871, 19974), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_test_multiple_groups'], {'args': '(world_size, temp_file_name)', 'nprocs': 'world_size', 'join': '(True)'}), '(run_test_multiple_groups, args=(world_size, temp_file_name),\n nprocs=world_size, join=True)\n', (19879, 19974), True, 'import torch.multiprocessing as mp\n'), ((20124, 20142), 'torch.device', 'torch.device', (['rank'], {}), '(rank)\n', (20136, 20142), False, 'import torch\n'), ((20147, 20170), 'torch.manual_seed', 'torch.manual_seed', (['rank'], {}), '(rank)\n', (20164, 20170), False, 'import torch\n'), ((20364, 20412), 'torch.rand', 'torch.rand', (['(batch, target_width)'], {'device': 'device'}), '((batch, target_width), device=device)\n', (20374, 20412), False, 'import torch\n'), ((20426, 20473), 'torch.rand', 'torch.rand', (['(batch, input_width)'], {'device': 'device'}), '((batch, input_width), device=device)\n', (20436, 20473), False, 'import torch\n'), ((22603, 22631), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (22629, 22631), True, 'import torch.distributed as dist\n'), ((22751, 22776), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (22774, 22776), False, 'import torch\n'), ((22870, 22971), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_gradient_clipping'], {'args': '(world_size, temp_file_name)', 'nprocs': 'world_size', 'join': '(True)'}), '(run_gradient_clipping, args=(world_size, temp_file_name), nprocs=\n world_size, join=True)\n', (22878, 22971), True, 'import torch.multiprocessing as mp\n'), ((23126, 23144), 'torch.device', 'torch.device', (['rank'], {}), '(rank)\n', (23138, 23144), False, 'import torch\n'), ((23149, 23172), 'torch.manual_seed', 'torch.manual_seed', (['rank'], {}), '(rank)\n', (23166, 23172), False, 'import torch\n'), ((23503, 23551), 'torch.rand', 'torch.rand', (['(batch, target_width)'], {'device': 'device'}), '((batch, target_width), device=device)\n', (23513, 23551), False, 'import torch\n'), ((23565, 23612), 'torch.rand', 'torch.rand', (['(batch, input_width)'], {'device': 'device'}), '((batch, input_width), device=device)\n', (23575, 23612), False, 'import torch\n'), ((23816, 23841), 'copy.deepcopy', 'copy.deepcopy', (['model_oss1'], {}), '(model_oss1)\n', (23829, 23841), False, 'import copy\n'), ((23858, 23882), 'copy.deepcopy', 'copy.deepcopy', (['head_oss1'], {}), '(head_oss1)\n', (23871, 23882), False, 'import copy\n'), ((24256, 24297), 'torch.nn.parallel.DistributedDataParallel', 'DDP', ([], {'module': 'model_oss1', 'device_ids': '[rank]'}), '(module=model_oss1, device_ids=[rank])\n', (24259, 24297), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((24475, 24516), 'torch.nn.parallel.DistributedDataParallel', 'DDP', ([], {'module': 'model_oss2', 'device_ids': '[rank]'}), '(module=model_oss2, device_ids=[rank])\n', (24478, 24516), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((25442, 25568), 'fairscale.utils.testing.check_same_model_params', 'check_same_model_params', (['model_oss1', 'model_oss2', '"""parameters of the two identical models have diverged (before any steps)"""'], {}), "(model_oss1, model_oss2,\n 'parameters of the two identical models have diverged (before any steps)')\n", (25465, 25568), False, 'from fairscale.utils.testing import check_same_model_params, skip_if_no_cuda, skip_if_py39_no_cuda, skip_if_single_gpu\n'), ((25764, 25888), 'fairscale.utils.testing.check_same_model_params', 'check_same_model_params', (['model_oss1', 'model_oss2', '"""parameters of the two identical models have diverged (after stepping)"""'], {}), "(model_oss1, model_oss2,\n 'parameters of the two identical models have diverged (after stepping)')\n", (25787, 25888), False, 'from fairscale.utils.testing import check_same_model_params, skip_if_no_cuda, skip_if_py39_no_cuda, skip_if_single_gpu\n'), ((26708, 26842), 'fairscale.utils.testing.check_same_model_params', 'check_same_model_params', (['model_oss1', 'model_oss2', '"""parameters of the two identical models have diverged (after consolidating)"""'], {}), "(model_oss1, model_oss2,\n 'parameters of the two identical models have diverged (after consolidating)'\n )\n", (26731, 26842), False, 'from fairscale.utils.testing import check_same_model_params, skip_if_no_cuda, skip_if_py39_no_cuda, skip_if_single_gpu\n'), ((27541, 27666), 'fairscale.utils.testing.check_same_model_params', 'check_same_model_params', (['model_oss1', 'model_oss2', '"""parameters of the two identical models have diverged (after reloading)"""'], {}), "(model_oss1, model_oss2,\n 'parameters of the two identical models have diverged (after reloading)')\n", (27564, 27666), False, 'from fairscale.utils.testing import check_same_model_params, skip_if_no_cuda, skip_if_py39_no_cuda, skip_if_single_gpu\n'), ((27682, 27710), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (27708, 27710), True, 'import torch.distributed as dist\n'), ((27835, 27860), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (27858, 27860), False, 'import torch\n'), ((27931, 28036), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_state_dict_distributed'], {'args': '(world_size, temp_file_name)', 'nprocs': 'world_size', 'join': '(True)'}), '(run_state_dict_distributed, args=(world_size, temp_file_name),\n nprocs=world_size, join=True)\n', (27939, 28036), True, 'import torch.multiprocessing as mp\n'), ((28154, 28249), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'init_method': 'url', 'backend': 'backend', 'rank': 'rank', 'world_size': 'world_size'}), '(init_method=url, backend=backend, rank=rank,\n world_size=world_size)\n', (28177, 28249), True, 'import torch.distributed as dist\n'), ((28260, 28280), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (28272, 28280), False, 'import torch\n'), ((28285, 28312), 'torch.cuda.set_device', 'torch.cuda.set_device', (['rank'], {}), '(rank)\n', (28306, 28312), False, 'import torch\n'), ((28317, 28340), 'torch.manual_seed', 'torch.manual_seed', (['rank'], {}), '(rank)\n', (28334, 28340), False, 'import torch\n'), ((28345, 28365), 'numpy.random.seed', 'np.random.seed', (['rank'], {}), '(rank)\n', (28359, 28365), True, 'import numpy as np\n'), ((33329, 33357), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (33355, 33357), True, 'import torch.distributed as dist\n'), ((33480, 33505), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (33503, 33505), False, 'import torch\n'), ((33542, 33645), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_ddp_parity'], {'args': '(world_size, backend, temp_file_name)', 'nprocs': 'world_size', 'join': '(True)'}), '(run_ddp_parity, args=(world_size, backend, temp_file_name), nprocs\n =world_size, join=True)\n', (33550, 33645), True, 'import torch.multiprocessing as mp\n'), ((1553, 1632), 'torch.distributed.broadcast_object_list', 'dist.broadcast_object_list', (['package'], {'src': 'reference_rank', 'group': 'dist.group.WORLD'}), '(package, src=reference_rank, group=dist.group.WORLD)\n', (1579, 1632), True, 'import torch.distributed as dist\n'), ((1700, 1820), 'fairscale.optim.utils.broadcast_object', 'optim.utils.broadcast_object', (['something_to_sync'], {'src_rank': 'reference_rank', 'group': 'dist.group.WORLD', 'dist_device': 'device'}), '(something_to_sync, src_rank=reference_rank,\n group=dist.group.WORLD, dist_device=device)\n', (1728, 1820), True, 'import fairscale.optim as optim\n'), ((2098, 2139), 'torch.distributed.destroy_process_group', 'torch.distributed.destroy_process_group', ([], {}), '()\n', (2137, 2139), False, 'import torch\n'), ((2213, 2239), 'fairscale.optim.OSS', 'optim.OSS', (['params'], {'lr': '(0.01)'}), '(params, lr=0.01)\n', (2222, 2239), True, 'import fairscale.optim as optim\n'), ((2284, 2338), 'torch.tensor', 'torch.tensor', (['[1.0]'], {'device': 'DEVICE', 'requires_grad': '(True)'}), '([1.0], device=DEVICE, requires_grad=True)\n', (2296, 2338), False, 'import torch\n'), ((2351, 2387), 'fairscale.optim.OSS', 'optim.OSS', (['[x]'], {'lr': '(0.1)', 'momentum': '(0.9)'}), '([x], lr=0.1, momentum=0.9)\n', (2360, 2387), True, 'import fairscale.optim as optim\n'), ((3606, 3629), 'fairscale.optim.OSS', 'optim.OSS', (['[x]'], {'lr': '(0.01)'}), '([x], lr=0.01)\n', (3615, 3629), True, 'import fairscale.optim as optim\n'), ((4357, 4411), 'torch.tensor', 'torch.tensor', (['[1.0]'], {'device': 'DEVICE', 'requires_grad': '(True)'}), '([1.0], device=DEVICE, requires_grad=True)\n', (4369, 4411), False, 'import torch\n'), ((4425, 4479), 'torch.tensor', 'torch.tensor', (['[1.0]'], {'device': 'DEVICE', 'requires_grad': '(True)'}), '([1.0], device=DEVICE, requires_grad=True)\n', (4437, 4479), False, 'import torch\n'), ((4492, 4515), 'fairscale.optim.OSS', 'optim.OSS', (['[x]'], {'lr': '(0.01)'}), '([x], lr=0.01)\n', (4501, 4515), True, 'import fairscale.optim as optim\n'), ((4529, 4559), 'torch.optim.SGD', 'torch.optim.SGD', (['[x2]'], {'lr': '(0.01)'}), '([x2], lr=0.01)\n', (4544, 4559), False, 'import torch\n'), ((4572, 4609), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['o', '(1)'], {}), '(o, 1)\n', (4603, 4609), False, 'import torch\n'), ((4623, 4661), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['o2', '(1)'], {}), '(o2, 1)\n', (4654, 4661), False, 'import torch\n'), ((5140, 5194), 'torch.tensor', 'torch.tensor', (['[1.0]'], {'device': 'DEVICE', 'requires_grad': '(True)'}), '([1.0], device=DEVICE, requires_grad=True)\n', (5152, 5194), False, 'import torch\n'), ((5207, 5247), 'fairscale.optim.OSS', 'optim.OSS', (['[x]', 'SGDWithStepKWArg'], {'lr': '(0.1)'}), '([x], SGDWithStepKWArg, lr=0.1)\n', (5216, 5247), True, 'import fairscale.optim as optim\n'), ((5687, 5741), 'torch.tensor', 'torch.tensor', (['[1.0]'], {'device': 'DEVICE', 'requires_grad': '(True)'}), '([1.0], device=DEVICE, requires_grad=True)\n', (5699, 5741), False, 'import torch\n'), ((5754, 5791), 'fairscale.optim.OSS', 'optim.OSS', (['[x]', 'SGDWithNewKey'], {'lr': '(0.1)'}), '([x], SGDWithNewKey, lr=0.1)\n', (5763, 5791), True, 'import fairscale.optim as optim\n'), ((6107, 6161), 'torch.tensor', 'torch.tensor', (['[1.0]'], {'device': 'DEVICE', 'requires_grad': '(True)'}), '([1.0], device=DEVICE, requires_grad=True)\n', (6119, 6161), False, 'import torch\n'), ((6174, 6215), 'fairscale.optim.OSS', 'optim.OSS', (['[x]', 'SGDWithoutClosure'], {'lr': '(0.1)'}), '([x], SGDWithoutClosure, lr=0.1)\n', (6183, 6215), True, 'import fairscale.optim as optim\n'), ((6368, 6422), 'torch.tensor', 'torch.tensor', (['[1.0]'], {'device': 'DEVICE', 'requires_grad': '(True)'}), '([1.0], device=DEVICE, requires_grad=True)\n', (6380, 6422), False, 'import torch\n'), ((6435, 6457), 'fairscale.optim.OSS', 'optim.OSS', (['[x]'], {'lr': '(0.1)'}), '([x], lr=0.1)\n', (6444, 6457), True, 'import fairscale.optim as optim\n'), ((7057, 7082), 'fairscale.optim.OSS', 'optim.OSS', (['params'], {'lr': '(0.1)'}), '(params, lr=0.1)\n', (7066, 7082), True, 'import fairscale.optim as optim\n'), ((7847, 7872), 'fairscale.optim.OSS', 'optim.OSS', (['params'], {'lr': '(0.1)'}), '(params, lr=0.1)\n', (7856, 7872), True, 'import fairscale.optim as optim\n'), ((8191, 8216), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8214, 8216), False, 'import torch\n'), ((8874, 8899), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8897, 8899), False, 'import torch\n'), ((9030, 9048), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (9046, 9048), False, 'import tempfile\n'), ((9320, 9349), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (9333, 9349), False, 'import pytest\n'), ((9996, 10046), 'torch.distributed.all_reduce', 'dist.all_reduce', (['p.grad.data'], {'op': 'dist.ReduceOp.SUM'}), '(p.grad.data, op=dist.ReduceOp.SUM)\n', (10011, 10046), True, 'import torch.distributed as dist\n'), ((10117, 10152), 'torch.tensor', 'torch.tensor', (['[[0.75]]'], {'device': 'rank'}), '([[0.75]], device=rank)\n', (10129, 10152), False, 'import torch\n'), ((10174, 10207), 'torch.tensor', 'torch.tensor', (['[1.85]'], {'device': 'rank'}), '([1.85], device=rank)\n', (10186, 10207), False, 'import torch\n'), ((10321, 10339), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (10337, 10339), False, 'import tempfile\n'), ((11033, 11083), 'torch.distributed.all_reduce', 'dist.all_reduce', (['p.grad.data'], {'op': 'dist.ReduceOp.SUM'}), '(p.grad.data, op=dist.ReduceOp.SUM)\n', (11048, 11083), True, 'import torch.distributed as dist\n'), ((11321, 11353), 'torch.tensor', 'torch.tensor', (['error'], {'device': 'rank'}), '(error, device=rank)\n', (11333, 11353), False, 'import torch\n'), ((11377, 11411), 'torch.tensor', 'torch.tensor', (['[[1.1]]'], {'device': 'rank'}), '([[1.1]], device=rank)\n', (11389, 11411), False, 'import torch\n'), ((11433, 11465), 'torch.tensor', 'torch.tensor', (['[2.1]'], {'device': 'rank'}), '([2.1], device=rank)\n', (11445, 11465), False, 'import torch\n'), ((11573, 11598), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (11596, 11598), False, 'import torch\n'), ((11621, 11639), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (11637, 11639), False, 'import tempfile\n'), ((12714, 12732), 'torch.device', 'torch.device', (['rank'], {}), '(rank)\n', (12726, 12732), False, 'import torch\n'), ((13056, 13092), 'torch.nn.Linear', 'torch.nn.Linear', (['input_width', 'hidden'], {}), '(input_width, hidden)\n', (13071, 13092), False, 'import torch\n'), ((13094, 13131), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden', 'target_width'], {}), '(hidden, target_width)\n', (13109, 13131), False, 'import torch\n'), ((14311, 14329), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (14327, 14329), False, 'import tempfile\n'), ((14731, 14749), 'torch.device', 'torch.device', (['rank'], {}), '(rank)\n', (14743, 14749), False, 'import torch\n'), ((15073, 15109), 'torch.nn.Linear', 'torch.nn.Linear', (['input_width', 'hidden'], {}), '(input_width, hidden)\n', (15088, 15109), False, 'import torch\n'), ((15111, 15148), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden', 'target_width'], {}), '(hidden, target_width)\n', (15126, 15148), False, 'import torch\n'), ((16413, 16431), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (16429, 16431), False, 'import tempfile\n'), ((16443, 16468), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16466, 16468), False, 'import torch\n'), ((19844, 19862), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (19860, 19862), False, 'import tempfile\n'), ((20774, 20798), 'copy.deepcopy', 'copy.deepcopy', (['model_oss'], {}), '(model_oss)\n', (20787, 20798), False, 'import copy\n'), ((21191, 21231), 'torch.nn.parallel.DistributedDataParallel', 'DDP', ([], {'module': 'model_oss', 'device_ids': '[rank]'}), '(module=model_oss, device_ids=[rank])\n', (21194, 21231), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((21335, 21364), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['model'], {'device_ids': '[rank]'}), '(model, device_ids=[rank])\n', (21338, 21364), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((21385, 21402), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (21400, 21402), False, 'import torch\n'), ((21710, 21755), 'torch.testing.assert_allclose', 'torch.testing.assert_allclose', (['loss_oss', 'loss'], {}), '(loss_oss, loss)\n', (21739, 21755), False, 'import torch\n'), ((22015, 22057), 'torch.allclose', 'torch.allclose', (['oss_total_norm', 'total_norm'], {}), '(oss_total_norm, total_norm)\n', (22029, 22057), False, 'import torch\n'), ((22721, 22739), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (22737, 22739), False, 'import tempfile\n'), ((27805, 27823), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (27821, 27823), False, 'import tempfile\n'), ((28999, 29031), 'torch.nn.Sequential', 'torch.nn.Sequential', (['trunk', 'head'], {}), '(trunk, head)\n', (29018, 29031), False, 'import torch\n'), ((29364, 29488), 'fairscale.optim.OSS', 'optim.OSS', ([], {'params': 'oss_trainable_params', 'optim': 'optimizer', 'group': 'None', 'broadcast_buffer_size': '(2 ** 10)'}), '(params=oss_trainable_params, optim=optimizer, group=None,\n broadcast_buffer_size=2 ** 10, **optimizer_settings)\n', (29373, 29488), True, 'import fairscale.optim as optim\n'), ((29581, 29679), 'torch.nn.parallel.DistributedDataParallel', 'DDP', ([], {'module': 'oss_module', 'device_ids': '[rank]', 'broadcast_buffers': '(True)', 'find_unused_parameters': '(True)'}), '(module=oss_module, device_ids=[rank], broadcast_buffers=True,\n find_unused_parameters=True)\n', (29584, 29679), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((29760, 29780), 'copy.deepcopy', 'copy.deepcopy', (['trunk'], {}), '(trunk)\n', (29773, 29780), False, 'import copy\n'), ((29800, 29819), 'copy.deepcopy', 'copy.deepcopy', (['head'], {}), '(head)\n', (29813, 29819), False, 'import copy\n'), ((29841, 29881), 'torch.nn.Sequential', 'torch.nn.Sequential', (['ddp_trunk', 'ddp_head'], {}), '(ddp_trunk, ddp_head)\n', (29860, 29881), False, 'import torch\n'), ((30159, 30257), 'torch.nn.parallel.DistributedDataParallel', 'DDP', ([], {'module': 'ddp_module', 'device_ids': '[rank]', 'broadcast_buffers': '(True)', 'find_unused_parameters': '(True)'}), '(module=ddp_module, device_ids=[rank], broadcast_buffers=True,\n find_unused_parameters=True)\n', (30162, 30257), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((31423, 31472), 'fairscale.utils.testing.check_same_model_params', 'check_same_model_params', (['oss_ddp_model', 'ddp_model'], {}), '(oss_ddp_model, ddp_model)\n', (31446, 31472), False, 'from fairscale.utils.testing import check_same_model_params, skip_if_no_cuda, skip_if_py39_no_cuda, skip_if_single_gpu\n'), ((32647, 32676), 'copy.deepcopy', 'copy.deepcopy', (['ddp_state_dict'], {}), '(ddp_state_dict)\n', (32660, 32676), False, 'import copy\n'), ((33441, 33459), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (33457, 33459), False, 'import tempfile\n'), ((2186, 2199), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (2196, 2199), False, 'import torch\n'), ((2446, 2480), 'torch.tensor', 'torch.tensor', (['[0.9]'], {'device': 'DEVICE'}), '([0.9], device=DEVICE)\n', (2458, 2480), False, 'import torch\n'), ((2535, 2569), 'torch.tensor', 'torch.tensor', (['[1.0]'], {'device': 'DEVICE'}), '([1.0], device=DEVICE)\n', (2547, 2569), False, 'import torch\n'), ((3781, 3815), 'torch.tensor', 'torch.tensor', (['[1.0]'], {'device': 'DEVICE'}), '([1.0], device=DEVICE)\n', (3793, 3815), False, 'import torch\n'), ((4049, 4084), 'torch.tensor', 'torch.tensor', (['[0.71]'], {'device': 'DEVICE'}), '([0.71], device=DEVICE)\n', (4061, 4084), False, 'import torch\n'), ((4139, 4173), 'torch.tensor', 'torch.tensor', (['[1.9]'], {'device': 'DEVICE'}), '([1.9], device=DEVICE)\n', (4151, 4173), False, 'import torch\n'), ((5348, 5382), 'torch.tensor', 'torch.tensor', (['[0.9]'], {'device': 'DEVICE'}), '([0.9], device=DEVICE)\n', (5360, 5382), False, 'import torch\n'), ((5901, 5935), 'torch.tensor', 'torch.tensor', (['[0.9]'], {'device': 'DEVICE'}), '([0.9], device=DEVICE)\n', (5913, 5935), False, 'import torch\n'), ((6274, 6308), 'torch.tensor', 'torch.tensor', (['[0.9]'], {'device': 'DEVICE'}), '([0.9], device=DEVICE)\n', (6286, 6308), False, 'import torch\n'), ((6471, 6498), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (6484, 6498), False, 'import pytest\n'), ((8221, 8246), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (8244, 8246), False, 'import torch\n'), ((8298, 8323), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (8321, 8323), False, 'import torch\n'), ((8904, 8929), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (8927, 8929), False, 'import torch\n'), ((8981, 9006), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (9004, 9006), False, 'import torch\n'), ((11985, 12004), 'torch.rand', 'torch.rand', (['size', '(1)'], {}), '(size, 1)\n', (11995, 12004), False, 'import torch\n'), ((12406, 12431), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (12429, 12431), False, 'import torch\n'), ((12736, 12761), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (12759, 12761), False, 'import torch\n'), ((14405, 14430), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (14428, 14430), False, 'import torch\n'), ((14753, 14778), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (14776, 14778), False, 'import torch\n'), ((16473, 16498), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (16496, 16498), False, 'import torch\n'), ((17422, 17439), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (17437, 17439), False, 'import torch\n'), ((17611, 17659), 'torch.rand', 'torch.rand', (['(batch, target_width)'], {'device': 'device'}), '((batch, target_width), device=device)\n', (17621, 17659), False, 'import torch\n'), ((17681, 17728), 'torch.rand', 'torch.rand', (['(batch, input_width)'], {'device': 'device'}), '((batch, input_width), device=device)\n', (17691, 17728), False, 'import torch\n'), ((22815, 22840), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (22838, 22840), False, 'import torch\n'), ((23749, 23786), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden', 'target_width'], {}), '(hidden, target_width)\n', (23764, 23786), False, 'import torch\n'), ((24691, 24708), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (24706, 24708), False, 'import torch\n'), ((27899, 27924), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (27922, 27924), False, 'import torch\n'), ((28659, 28695), 'torch.nn.Linear', 'torch.nn.Linear', (['in_channels', 'hidden'], {}), '(in_channels, hidden)\n', (28674, 28695), False, 'import torch\n'), ((28697, 28728), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden', 'hidden'], {}), '(hidden, hidden)\n', (28712, 28728), False, 'import torch\n'), ((28730, 28761), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden', 'hidden'], {}), '(hidden, hidden)\n', (28745, 28761), False, 'import torch\n'), ((31041, 31097), 'torch.allclose', 'torch.allclose', (['loss_ddp', 'loss_sharded_optim'], {'rtol': '(0.001)'}), '(loss_ddp, loss_sharded_optim, rtol=0.001)\n', (31055, 31097), False, 'import torch\n'), ((31267, 31316), 'fairscale.utils.testing.check_same_model_params', 'check_same_model_params', (['oss_ddp_model', 'ddp_model'], {}), '(oss_ddp_model, ddp_model)\n', (31290, 31316), False, 'from fairscale.utils.testing import check_same_model_params, skip_if_no_cuda, skip_if_py39_no_cuda, skip_if_single_gpu\n'), ((2042, 2060), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (2058, 2060), False, 'import tempfile\n'), ((6878, 6897), 'torch.rand', 'torch.rand', (['size', '(1)'], {}), '(size, 1)\n', (6888, 6897), False, 'import torch\n'), ((7664, 7683), 'torch.rand', 'torch.rand', (['size', '(1)'], {}), '(size, 1)\n', (7674, 7683), False, 'import torch\n'), ((17964, 18006), 'torch.distributed.all_reduce', 'dist.all_reduce', (['loss'], {'group': 'process_group'}), '(loss, group=process_group)\n', (17979, 18006), True, 'import torch.distributed as dist\n'), ((23651, 23687), 'torch.nn.Linear', 'torch.nn.Linear', (['input_width', 'hidden'], {}), '(input_width, hidden)\n', (23666, 23687), False, 'import torch\n'), ((23689, 23720), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden', 'hidden'], {}), '(hidden, hidden)\n', (23704, 23720), False, 'import torch\n'), ((28817, 28830), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (28827, 28830), False, 'import torch\n'), ((28882, 28919), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden', 'out_channels'], {}), '(hidden, out_channels)\n', (28897, 28919), False, 'import torch\n'), ((7162, 7178), 'torch.rand', 'torch.rand', (['(3)', '(1)'], {}), '(3, 1)\n', (7172, 7178), False, 'import torch\n'), ((7952, 7968), 'torch.rand', 'torch.rand', (['(3)', '(1)'], {}), '(3, 1)\n', (7962, 7968), False, 'import torch\n'), ((8383, 8401), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (8399, 8401), False, 'import tempfile\n'), ((9537, 9555), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (9553, 9555), False, 'import tempfile\n'), ((18390, 18444), 'torch.distributed.gather', 'dist.gather', (['p', 'receptacle'], {'dst': '(0)', 'group': 'process_group'}), '(p, receptacle, dst=0, group=process_group)\n', (18401, 18444), True, 'import torch.distributed as dist\n'), ((18948, 18984), 'torch.nn.Linear', 'torch.nn.Linear', (['input_width', 'hidden'], {}), '(input_width, hidden)\n', (18963, 18984), False, 'import torch\n'), ((18986, 19023), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden', 'target_width'], {}), '(hidden, target_width)\n', (19001, 19023), False, 'import torch\n'), ((19385, 19421), 'torch.nn.Linear', 'torch.nn.Linear', (['input_width', 'hidden'], {}), '(input_width, hidden)\n', (19400, 19421), False, 'import torch\n'), ((19423, 19460), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden', 'target_width'], {}), '(hidden, target_width)\n', (19438, 19460), False, 'import torch\n'), ((20603, 20639), 'torch.nn.Linear', 'torch.nn.Linear', (['input_width', 'hidden'], {}), '(input_width, hidden)\n', (20618, 20639), False, 'import torch\n'), ((20653, 20684), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden', 'hidden'], {}), '(hidden, hidden)\n', (20668, 20684), False, 'import torch\n'), ((20698, 20735), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden', 'target_width'], {}), '(hidden, target_width)\n', (20713, 20735), False, 'import torch\n'), ((22340, 22370), 'torch.norm', 'torch.norm', (['param.grad'], {'p': 'norm'}), '(param.grad, p=norm)\n', (22350, 22370), False, 'import torch\n'), ((30308, 30340), 'torch.rand', 'torch.rand', (['(batch, in_channels)'], {}), '((batch, in_channels))\n', (30318, 30340), False, 'import torch\n'), ((18611, 18642), 'torch.eq', 'torch.eq', (['receptacle[0]', 'sync_p'], {}), '(receptacle[0], sync_p)\n', (18619, 18642), False, 'import torch\n'), ((18755, 18780), 'torch.norm', 'torch.norm', (['receptacle[0]'], {}), '(receptacle[0])\n', (18765, 18780), False, 'import torch\n'), ((18782, 18800), 'torch.norm', 'torch.norm', (['sync_p'], {}), '(sync_p)\n', (18792, 18800), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 11 17:09:03 2021
@author: zongsing.huang
"""
# =============================================================================
# 最佳解的適應值為0
# 統計分析->mean, std
# 即時更新pbest, gbest
# 提早終止:若gbest_F的改善率小於0.1%連續發生0.1*G次,就結束計算
# 用semilogy畫圖
# =============================================================================
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import ranksums
def fitness(X):
# Schwefel2.22
if X.ndim==1:
X = X.reshape(1, -1)
F = np.sum( np.abs(X), axis=1 ) + np.prod( np.abs(X), axis=1 )
return F
#%% 參數設定
P = 30
D = 30
G = 500
k = 0.2
w_max = 0.9
w_min = 0.2
c1 = 2
c2 = 2
ub = 10*np.ones([D])
lb = -10*np.ones([D])
T = 10
SW = True # 提早終止
#%% 初始化
v_max = k*(ub-lb)*np.ones([P, D])
v_min = -1*v_max
loss_curve = np.zeros([T, G])
statistical_experiment = np.zeros(T)
#%% 迭代
for t in range(T):
X = np.random.uniform(low=lb, high=ub, size=[P, D])
V = np.zeros([P, D])
pbest_X = np.zeros([P, D])
pbest_F = np.ones(P)*np.inf
gbest_X = np.zeros(D)
gbest_F = np.inf
early_stopping = 0
for g in range(G):
for p in range(P):
# 適應值計算
F = fitness(X[p])
# 更新pbest
if F<pbest_F[p]:
pbest_X[p] = X[p]
pbest_F[p] = F
# 更新gbest
if F<gbest_F:
gbest_X = X[p]
gbest_F = F
early_stopping = 0
# 更新w
w = w_max - g*(w_max-w_min)/G
# 更新V
r1 = np.random.uniform(size=[D])
r2 = np.random.uniform(size=[D])
V[p] = w*V[p] + c1*r1*(pbest_X[p]-X[p]) + c2*r2*(gbest_X-X[p])
# 邊界處理
mask1 = V[p]>v_max[p]
mask2 = V[p]<v_min[p]
V[p, mask1] = v_max[p, mask1]
V[p, mask2] = v_min[p, mask2]
# 更新X
X[p] = X[p] + V[p]
# 邊界處理
mask1 = X[p]>ub
mask2 = X[p]<lb
X[p, mask1] = ub[mask1]
X[p, mask2] = lb[mask2]
loss_curve[t, g] = gbest_F
if np.abs(loss_curve[t, g]-loss_curve[t, g-1]) / np.abs(loss_curve[t, g])<=1e-3:
early_stopping = early_stopping + 1
if early_stopping>=0.1*G and SW==True:
break
statistical_experiment[t] = gbest_F
#%% 作圖
if SW==False:
plt.figure()
plt.plot(loss_curve.mean(axis=0))
plt.grid()
plt.semilogy(base=10)
plt.xlabel('Iteration')
plt.ylabel('Fitness')
else:
# 由於每次提早終止的次代都不同,導致loss_curve長度不一致
# 因此採取的作畫方法為,取T次適應值最好的畫圖
idx = np.argmin(statistical_experiment)
plt.figure()
plt.plot(loss_curve[idx])
plt.grid()
plt.semilogy(base=10)
plt.xlabel('Iteration')
plt.ylabel('Fitness')
#%% 統計分析
mean = statistical_experiment.mean()
std = statistical_experiment.std()
#%% Wilcoxon ranksum
assum1 = 0.01*statistical_experiment
assum2 = statistical_experiment
_, pvalue = ranksums(assum1, assum2)
if pvalue<0.05:
print('assum1 better than assum2~')
| [
"numpy.random.uniform",
"numpy.abs",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"numpy.argmin",
"matplotlib.pyplot.figure",
"scipy.stats.ranksums",
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] | [((833, 849), 'numpy.zeros', 'np.zeros', (['[T, G]'], {}), '([T, G])\n', (841, 849), True, 'import numpy as np\n'), ((875, 886), 'numpy.zeros', 'np.zeros', (['T'], {}), '(T)\n', (883, 886), True, 'import numpy as np\n'), ((3086, 3110), 'scipy.stats.ranksums', 'ranksums', (['assum1', 'assum2'], {}), '(assum1, assum2)\n', (3094, 3110), False, 'from scipy.stats import ranksums\n'), ((701, 713), 'numpy.ones', 'np.ones', (['[D]'], {}), '([D])\n', (708, 713), True, 'import numpy as np\n'), ((723, 735), 'numpy.ones', 'np.ones', (['[D]'], {}), '([D])\n', (730, 735), True, 'import numpy as np\n'), ((787, 802), 'numpy.ones', 'np.ones', (['[P, D]'], {}), '([P, D])\n', (794, 802), True, 'import numpy as np\n'), ((922, 969), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'lb', 'high': 'ub', 'size': '[P, D]'}), '(low=lb, high=ub, size=[P, D])\n', (939, 969), True, 'import numpy as np\n'), ((978, 994), 'numpy.zeros', 'np.zeros', (['[P, D]'], {}), '([P, D])\n', (986, 994), True, 'import numpy as np\n'), ((1009, 1025), 'numpy.zeros', 'np.zeros', (['[P, D]'], {}), '([P, D])\n', (1017, 1025), True, 'import numpy as np\n'), ((1072, 1083), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (1080, 1083), True, 'import numpy as np\n'), ((2495, 2507), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2505, 2507), True, 'import matplotlib.pyplot as plt\n'), ((2550, 2560), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2558, 2560), True, 'import matplotlib.pyplot as plt\n'), ((2565, 2586), 'matplotlib.pyplot.semilogy', 'plt.semilogy', ([], {'base': '(10)'}), '(base=10)\n', (2577, 2586), True, 'import matplotlib.pyplot as plt\n'), ((2591, 2614), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (2601, 2614), True, 'import matplotlib.pyplot as plt\n'), ((2619, 2640), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fitness"""'], {}), "('Fitness')\n", (2629, 2640), True, 'import matplotlib.pyplot as plt\n'), ((2725, 2758), 'numpy.argmin', 'np.argmin', (['statistical_experiment'], {}), '(statistical_experiment)\n', (2734, 2758), True, 'import numpy as np\n'), ((2763, 2775), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2773, 2775), True, 'import matplotlib.pyplot as plt\n'), ((2780, 2805), 'matplotlib.pyplot.plot', 'plt.plot', (['loss_curve[idx]'], {}), '(loss_curve[idx])\n', (2788, 2805), True, 'import matplotlib.pyplot as plt\n'), ((2810, 2820), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2818, 2820), True, 'import matplotlib.pyplot as plt\n'), ((2825, 2846), 'matplotlib.pyplot.semilogy', 'plt.semilogy', ([], {'base': '(10)'}), '(base=10)\n', (2837, 2846), True, 'import matplotlib.pyplot as plt\n'), ((2851, 2874), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (2861, 2874), True, 'import matplotlib.pyplot as plt\n'), ((2879, 2900), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fitness"""'], {}), "('Fitness')\n", (2889, 2900), True, 'import matplotlib.pyplot as plt\n'), ((1040, 1050), 'numpy.ones', 'np.ones', (['P'], {}), '(P)\n', (1047, 1050), True, 'import numpy as np\n'), ((546, 555), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (552, 555), True, 'import numpy as np\n'), ((577, 586), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (583, 586), True, 'import numpy as np\n'), ((1638, 1665), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[D]'}), '(size=[D])\n', (1655, 1665), True, 'import numpy as np\n'), ((1683, 1710), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[D]'}), '(size=[D])\n', (1700, 1710), True, 'import numpy as np\n'), ((2225, 2272), 'numpy.abs', 'np.abs', (['(loss_curve[t, g] - loss_curve[t, g - 1])'], {}), '(loss_curve[t, g] - loss_curve[t, g - 1])\n', (2231, 2272), True, 'import numpy as np\n'), ((2271, 2295), 'numpy.abs', 'np.abs', (['loss_curve[t, g]'], {}), '(loss_curve[t, g])\n', (2277, 2295), True, 'import numpy as np\n')] |
from trueskill import Rating, quality, rate, TrueSkill
import itertools
import math
from .config import *
import numpy as np
def win_probability(ts, team1, team2):
delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2)
sum_sigma = sum(r.sigma ** 2 for r in itertools.chain(team1, team2))
size = len(team1) + len(team2)
denom = math.sqrt(size * (ts.beta ** 2) + sum_sigma)
return ts.cdf(delta_mu / denom)
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def analyze_players(players, best_match_only):
if len(players) < 2 or len(players) > 4:
return
ts = TrueSkill(mu=MU, sigma=SIGMA, beta=BETA, tau=TAU, draw_probability=DRAW_PROB)
possible_positions = list(itertools.permutations(players, len(players)))
blue_teams = []
red_teams = []
match_balances = []
for pos in possible_positions:
if len(pos) == 2:
blue_team = [Rating(mu=pos[0].rating_mu, sigma=pos[0].rating_sigma)]
red_team = [Rating(mu=pos[1].rating_mu, sigma=pos[1].rating_sigma)]
if len(pos) == 3:
blue_team = [Rating(mu=pos[0].rating_mu, sigma=pos[0].rating_sigma)]
red_team = [
Rating(mu=pos[1].rating_mu_offense, sigma=pos[1].rating_sigma_offense),
Rating(mu=pos[2].rating_mu_defense, sigma=pos[2].rating_sigma_defense)
]
if len(pos) == 4:
blue_offense = Rating(mu=pos[0].rating_mu_offense, sigma=pos[0].rating_sigma_offense)
blue_defense = Rating(mu=pos[1].rating_mu_defense, sigma=pos[1].rating_sigma_defense)
red_offense = Rating(mu=pos[2].rating_mu_offense, sigma=pos[2].rating_sigma_offense)
red_defense = Rating(mu=pos[3].rating_mu_defense, sigma=pos[3].rating_sigma_defense)
blue_team = [blue_offense, blue_defense]
red_team = [red_offense, red_defense]
match_balance = ts.quality([blue_team, red_team])
match_balances.append(match_balance)
blue_teams.append(blue_team)
red_teams.append(red_team)
# Draw from positions with weights `match_balances`
if best_match_only:
final_ix = np.argmax(match_balances)
else:
softmaxed = [x / sum(match_balances) for x in match_balances]
softmaxed = softmax([x * EXPLOITATION_FACTOR for x in softmaxed])
final_ix = np.random.choice(range(len(softmaxed)), p=softmaxed)
print(softmaxed)
final_composition = possible_positions[final_ix]
final_match_balance = match_balances[final_ix]
final_blue_team = blue_teams[final_ix]
final_red_team = red_teams[final_ix]
if len(players) == 2:
return {
"optimal_team_composition": {
"blue": {
"offense": final_composition[0].slack_username
},
"red": {
"offense": final_composition[1].slack_username
}
},
"match_balance": match_balance,
"predicted_win_prob_for_blue": win_probability(ts, final_blue_team, final_red_team)
}
if len(players) == 3:
return {
"optimal_team_composition": {
"blue": {
"offense": final_composition[0].slack_username
},
"red": {
"offense": final_composition[1].slack_username,
"defense": final_composition[2].slack_username
}
},
"match_balance": match_balance,
"predicted_win_prob_for_blue": win_probability(ts, final_blue_team, final_red_team)
}
if len(players) == 4:
return {
"optimal_team_composition": {
"blue": {
"offense": final_composition[0].slack_username,
"defense": final_composition[1].slack_username
},
"red": {
"offense": final_composition[2].slack_username,
"defense": final_composition[3].slack_username
}
},
"match_balance": match_balance,
"predicted_win_prob_for_blue": win_probability(ts, final_blue_team, final_red_team)
}
def _build_team(rating1, rating2):
team = []
if rating1 is not None:
team.append(rating1)
if rating2 is not None:
team.append(rating2)
return team
def analyze_teams(player_blue_offense, player_blue_defense, player_red_offense, player_red_defense):
ts = TrueSkill(mu=MU, sigma=SIGMA, beta=BETA, tau=TAU, draw_probability=DRAW_PROB)
player_blue_offense_rating = Rating(mu=player_blue_offense.rating_mu_offense,
sigma=player_blue_offense.rating_sigma_offense) if player_blue_offense is not None else None
player_blue_defense_rating = Rating(mu=player_blue_defense.rating_mu_defense,
sigma=player_blue_defense.rating_sigma_defense) if player_blue_defense is not None else None
player_red_offense_rating = Rating(mu=player_red_offense.rating_mu_offense,
sigma=player_red_offense.rating_sigma_offense) if player_red_offense is not None else None
player_red_defense_rating = Rating(mu=player_red_defense.rating_mu_defense,
sigma=player_red_defense.rating_sigma_defense) if player_red_defense is not None else None
blue_team = _build_team(player_blue_offense_rating, player_blue_defense_rating)
red_team = _build_team(player_red_offense_rating, player_red_defense_rating)
match_balance = ts.quality([blue_team, red_team])
win_prob = win_probability(ts, blue_team, red_team)
return {"match_balance": match_balance, "predicted_win_prob_for_blue": win_prob}
| [
"math.sqrt",
"numpy.argmax",
"numpy.max",
"trueskill.TrueSkill",
"itertools.chain",
"trueskill.Rating"
] | [((353, 395), 'math.sqrt', 'math.sqrt', (['(size * ts.beta ** 2 + sum_sigma)'], {}), '(size * ts.beta ** 2 + sum_sigma)\n', (362, 395), False, 'import math\n'), ((699, 776), 'trueskill.TrueSkill', 'TrueSkill', ([], {'mu': 'MU', 'sigma': 'SIGMA', 'beta': 'BETA', 'tau': 'TAU', 'draw_probability': 'DRAW_PROB'}), '(mu=MU, sigma=SIGMA, beta=BETA, tau=TAU, draw_probability=DRAW_PROB)\n', (708, 776), False, 'from trueskill import Rating, quality, rate, TrueSkill\n'), ((4647, 4724), 'trueskill.TrueSkill', 'TrueSkill', ([], {'mu': 'MU', 'sigma': 'SIGMA', 'beta': 'BETA', 'tau': 'TAU', 'draw_probability': 'DRAW_PROB'}), '(mu=MU, sigma=SIGMA, beta=BETA, tau=TAU, draw_probability=DRAW_PROB)\n', (4656, 4724), False, 'from trueskill import Rating, quality, rate, TrueSkill\n'), ((2262, 2287), 'numpy.argmax', 'np.argmax', (['match_balances'], {}), '(match_balances)\n', (2271, 2287), True, 'import numpy as np\n'), ((4759, 4860), 'trueskill.Rating', 'Rating', ([], {'mu': 'player_blue_offense.rating_mu_offense', 'sigma': 'player_blue_offense.rating_sigma_offense'}), '(mu=player_blue_offense.rating_mu_offense, sigma=player_blue_offense.\n rating_sigma_offense)\n', (4765, 4860), False, 'from trueskill import Rating, quality, rate, TrueSkill\n'), ((4974, 5075), 'trueskill.Rating', 'Rating', ([], {'mu': 'player_blue_defense.rating_mu_defense', 'sigma': 'player_blue_defense.rating_sigma_defense'}), '(mu=player_blue_defense.rating_mu_defense, sigma=player_blue_defense.\n rating_sigma_defense)\n', (4980, 5075), False, 'from trueskill import Rating, quality, rate, TrueSkill\n'), ((5188, 5287), 'trueskill.Rating', 'Rating', ([], {'mu': 'player_red_offense.rating_mu_offense', 'sigma': 'player_red_offense.rating_sigma_offense'}), '(mu=player_red_offense.rating_mu_offense, sigma=player_red_offense.\n rating_sigma_offense)\n', (5194, 5287), False, 'from trueskill import Rating, quality, rate, TrueSkill\n'), ((5398, 5497), 'trueskill.Rating', 'Rating', ([], {'mu': 'player_red_defense.rating_mu_defense', 'sigma': 'player_red_defense.rating_sigma_defense'}), '(mu=player_red_defense.rating_mu_defense, sigma=player_red_defense.\n rating_sigma_defense)\n', (5404, 5497), False, 'from trueskill import Rating, quality, rate, TrueSkill\n'), ((536, 545), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (542, 545), True, 'import numpy as np\n'), ((1517, 1587), 'trueskill.Rating', 'Rating', ([], {'mu': 'pos[0].rating_mu_offense', 'sigma': 'pos[0].rating_sigma_offense'}), '(mu=pos[0].rating_mu_offense, sigma=pos[0].rating_sigma_offense)\n', (1523, 1587), False, 'from trueskill import Rating, quality, rate, TrueSkill\n'), ((1615, 1685), 'trueskill.Rating', 'Rating', ([], {'mu': 'pos[1].rating_mu_defense', 'sigma': 'pos[1].rating_sigma_defense'}), '(mu=pos[1].rating_mu_defense, sigma=pos[1].rating_sigma_defense)\n', (1621, 1685), False, 'from trueskill import Rating, quality, rate, TrueSkill\n'), ((1713, 1783), 'trueskill.Rating', 'Rating', ([], {'mu': 'pos[2].rating_mu_offense', 'sigma': 'pos[2].rating_sigma_offense'}), '(mu=pos[2].rating_mu_offense, sigma=pos[2].rating_sigma_offense)\n', (1719, 1783), False, 'from trueskill import Rating, quality, rate, TrueSkill\n'), ((1810, 1880), 'trueskill.Rating', 'Rating', ([], {'mu': 'pos[3].rating_mu_defense', 'sigma': 'pos[3].rating_sigma_defense'}), '(mu=pos[3].rating_mu_defense, sigma=pos[3].rating_sigma_defense)\n', (1816, 1880), False, 'from trueskill import Rating, quality, rate, TrueSkill\n'), ((275, 304), 'itertools.chain', 'itertools.chain', (['team1', 'team2'], {}), '(team1, team2)\n', (290, 304), False, 'import itertools\n'), ((1005, 1059), 'trueskill.Rating', 'Rating', ([], {'mu': 'pos[0].rating_mu', 'sigma': 'pos[0].rating_sigma'}), '(mu=pos[0].rating_mu, sigma=pos[0].rating_sigma)\n', (1011, 1059), False, 'from trueskill import Rating, quality, rate, TrueSkill\n'), ((1085, 1139), 'trueskill.Rating', 'Rating', ([], {'mu': 'pos[1].rating_mu', 'sigma': 'pos[1].rating_sigma'}), '(mu=pos[1].rating_mu, sigma=pos[1].rating_sigma)\n', (1091, 1139), False, 'from trueskill import Rating, quality, rate, TrueSkill\n'), ((1193, 1247), 'trueskill.Rating', 'Rating', ([], {'mu': 'pos[0].rating_mu', 'sigma': 'pos[0].rating_sigma'}), '(mu=pos[0].rating_mu, sigma=pos[0].rating_sigma)\n', (1199, 1247), False, 'from trueskill import Rating, quality, rate, TrueSkill\n'), ((1290, 1360), 'trueskill.Rating', 'Rating', ([], {'mu': 'pos[1].rating_mu_offense', 'sigma': 'pos[1].rating_sigma_offense'}), '(mu=pos[1].rating_mu_offense, sigma=pos[1].rating_sigma_offense)\n', (1296, 1360), False, 'from trueskill import Rating, quality, rate, TrueSkill\n'), ((1378, 1448), 'trueskill.Rating', 'Rating', ([], {'mu': 'pos[2].rating_mu_defense', 'sigma': 'pos[2].rating_sigma_defense'}), '(mu=pos[2].rating_mu_defense, sigma=pos[2].rating_sigma_defense)\n', (1384, 1448), False, 'from trueskill import Rating, quality, rate, TrueSkill\n')] |
import tensorflow as tf
from tensorflow.keras import layers
import tensorflow_probability as tfp
import numpy as np
from dataclasses import dataclass
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import tensorflow.keras.layers as kl
import tensorflow_probability as tfp
import numpy as np
class ActorCriticNet(tf.keras.Model):
def __init__(self, action_space):
super(ActorCriticNet, self).__init__()
self.action_space = action_space
self.dense1 = kl.Dense(100, activation="relu")
self.dense2 = kl.Dense(100, activation="relu")
self.values = kl.Dense(1, name="value")
self.policy_logits = kl.Dense(action_space)
@tf.function
def call(self, x):
x1 = self.dense1(x)
logits = self.policy_logits(x1)
x2 = self.dense2(x)
values = self.values(x2)
return values, logits
def sample_action(self, state):
state = tf.convert_to_tensor(np.atleast_2d(state), dtype=tf.float32)
_, logits = self(state)
action_probs = tf.nn.softmax(logits)
cdist = tfp.distributions.Categorical(probs=action_probs)
action = cdist.sample()
return action.numpy()[0]
def compute_grads(self, states, discounted_rewards):
"""
loss = MSE(discouted_rewards - V(state)) = MSE(Advantages)
"""
with tf.GradientTape() as tape:
estimated_values = self(states)
loss = tf.reduce_mean(
tf.square(discounted_rewards - estimated_values))
variables = self.trainable_variables
gradients = tape.gradient(loss, variables)
@dataclass
class Step:
state: np.ndarray
action: int
reward: float
next_state: np.ndarray
done: bool
@dataclass
class GlobalCounter:
n: int = 0
class A3CAgent:
MAX_TRAJECTORY = 5
def __init__(self, agent_id, env,
global_counter, action_space,
global_ACNet,
gamma, global_history, global_steps_fin):
self.agent_id = agent_id
self.env = env
self.global_counter = global_counter
self.action_space = action_space
self.global_ACNet = global_ACNet
self.local_ACNet = ActorCriticNet(self.action_space)
self.gamma = gamma
self.global_history = global_history
self.global_steps_fin = global_steps_fin
self.optimizer = tf.keras.optimizers.Adam(lr=0.0004)
def play(self, coord):
self.total_reward = 0
self.state = self.env.reset()
try:
while not coord.should_stop():
trajectory = self.play_n_steps(N=self.MAX_TRAJECTORY)
states = [step.state for step in trajectory]
actions = [step.action for step in trajectory]
if trajectory[-1].done:
R = 0
else:
values, _ = self.local_ACNet(
tf.convert_to_tensor(np.atleast_2d(trajectory[-1].next_state),
dtype=tf.float32))
R = values[0][0].numpy()
discounted_rewards = []
for step in reversed(trajectory):
R = step.reward + self.gamma * R
discounted_rewards.append(R)
discounted_rewards.reverse()
with tf.GradientTape() as tape:
total_loss = self.compute_loss(states, actions, discounted_rewards)
grads = tape.gradient(
total_loss, self.local_ACNet.trainable_variables)
self.optimizer.apply_gradients(
zip(grads, self.global_ACNet.trainable_variables))
self.local_ACNet.set_weights(self.global_ACNet.get_weights())
if self.global_counter.n >= self.global_steps_fin:
coord.request_stop()
except tf.errors.CancelledError:
return
def play_n_steps(self, N):
trajectory = []
for _ in range(N):
self.global_counter.n += 1
action = self.local_ACNet.sample_action(self.state)
next_state, reward, done, info = self.env.step(action)
step = Step(self.state, action, reward, next_state, done)
trajectory.append(step)
if done:
print(f"Global step {self.global_counter.n}")
print(f"Total Reward: {self.total_reward}")
print(f"Agent: {self.agent_id}")
print()
self.global_history.append(self.total_reward)
self.total_reward = 0
self.state = self.env.reset()
break
else:
self.total_reward += reward
self.state = next_state
return trajectory
def compute_loss(self, states, actions, discounted_rewards):
states = tf.convert_to_tensor(
np.vstack(states), dtype=tf.float32)
values, logits = self.local_ACNet(states)
discounted_rewards = tf.convert_to_tensor(
np.vstack(discounted_rewards), dtype=tf.float32)
advantages = discounted_rewards - values
value_loss = advantages ** 2
actions_onehot = tf.one_hot(actions, self.action_space, dtype=tf.float32)
action_probs = tf.nn.softmax(logits)
log_action_prob = actions_onehot * tf.math.log(action_probs + 1e-20)
log_action_prob = tf.reduce_sum(log_action_prob, axis=1, keepdims=True)
entropy = -1 * tf.reduce_sum(
action_probs * tf.math.log(action_probs + 1e-20),
axis=1, keepdims=True)
policy_loss = tf.reduce_sum(
log_action_prob * tf.stop_gradient(advantages),
axis=1, keepdims=True)
policy_loss += 0.01 * entropy
policy_loss *= -1
total_loss = tf.reduce_mean(0.5 * value_loss + policy_loss)
return total_loss | [
"tensorflow.nn.softmax",
"tensorflow.math.log",
"tensorflow.one_hot",
"tensorflow.reduce_sum",
"tensorflow.keras.layers.Dense",
"tensorflow_probability.distributions.Categorical",
"tensorflow.stop_gradient",
"tensorflow.reduce_mean",
"tensorflow.keras.optimizers.Adam",
"tensorflow.square",
"tens... | [((513, 545), 'tensorflow.keras.layers.Dense', 'kl.Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (521, 545), True, 'import tensorflow.keras.layers as kl\n'), ((568, 600), 'tensorflow.keras.layers.Dense', 'kl.Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (576, 600), True, 'import tensorflow.keras.layers as kl\n'), ((623, 648), 'tensorflow.keras.layers.Dense', 'kl.Dense', (['(1)'], {'name': '"""value"""'}), "(1, name='value')\n", (631, 648), True, 'import tensorflow.keras.layers as kl\n'), ((679, 701), 'tensorflow.keras.layers.Dense', 'kl.Dense', (['action_space'], {}), '(action_space)\n', (687, 701), True, 'import tensorflow.keras.layers as kl\n'), ((1076, 1097), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (1089, 1097), True, 'import tensorflow as tf\n'), ((1114, 1163), 'tensorflow_probability.distributions.Categorical', 'tfp.distributions.Categorical', ([], {'probs': 'action_probs'}), '(probs=action_probs)\n', (1143, 1163), True, 'import tensorflow_probability as tfp\n'), ((2458, 2493), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': '(0.0004)'}), '(lr=0.0004)\n', (2482, 2493), True, 'import tensorflow as tf\n'), ((5354, 5410), 'tensorflow.one_hot', 'tf.one_hot', (['actions', 'self.action_space'], {'dtype': 'tf.float32'}), '(actions, self.action_space, dtype=tf.float32)\n', (5364, 5410), True, 'import tensorflow as tf\n'), ((5435, 5456), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (5448, 5456), True, 'import tensorflow as tf\n'), ((5562, 5615), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['log_action_prob'], {'axis': '(1)', 'keepdims': '(True)'}), '(log_action_prob, axis=1, keepdims=True)\n', (5575, 5615), True, 'import tensorflow as tf\n'), ((5972, 6018), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(0.5 * value_loss + policy_loss)'], {}), '(0.5 * value_loss + policy_loss)\n', (5986, 6018), True, 'import tensorflow as tf\n'), ((980, 1000), 'numpy.atleast_2d', 'np.atleast_2d', (['state'], {}), '(state)\n', (993, 1000), True, 'import numpy as np\n'), ((1397, 1414), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (1412, 1414), True, 'import tensorflow as tf\n'), ((5039, 5056), 'numpy.vstack', 'np.vstack', (['states'], {}), '(states)\n', (5048, 5056), True, 'import numpy as np\n'), ((5191, 5220), 'numpy.vstack', 'np.vstack', (['discounted_rewards'], {}), '(discounted_rewards)\n', (5200, 5220), True, 'import numpy as np\n'), ((5501, 5534), 'tensorflow.math.log', 'tf.math.log', (['(action_probs + 1e-20)'], {}), '(action_probs + 1e-20)\n', (5512, 5534), True, 'import tensorflow as tf\n'), ((1521, 1569), 'tensorflow.square', 'tf.square', (['(discounted_rewards - estimated_values)'], {}), '(discounted_rewards - estimated_values)\n', (1530, 1569), True, 'import tensorflow as tf\n'), ((5820, 5848), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['advantages'], {}), '(advantages)\n', (5836, 5848), True, 'import tensorflow as tf\n'), ((3441, 3458), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3456, 3458), True, 'import tensorflow as tf\n'), ((5682, 5715), 'tensorflow.math.log', 'tf.math.log', (['(action_probs + 1e-20)'], {}), '(action_probs + 1e-20)\n', (5693, 5715), True, 'import tensorflow as tf\n'), ((3030, 3070), 'numpy.atleast_2d', 'np.atleast_2d', (['trajectory[-1].next_state'], {}), '(trajectory[-1].next_state)\n', (3043, 3070), True, 'import numpy as np\n')] |
import boto3
import botocore
import sshtunnel
import pymysql
import psycopg2
import sqlite3
import pandas as pd
import numpy as np
import re
from tqdm import tqdm
import os
import sys
import getpass
import json
import datetime
import hashlib
import warnings
import csv
from types import SimpleNamespace
from .misc import verbose_display, file_age, write, _get_config, _pycof_folders
# #######################################################################################################################
# Cache data from SQL
def _cache(sql, tunnel, query_type="SELECT", cache_time='24h', cache_file_name=None, verbose=False):
# Parse cache_time value
if type(cache_time) in [float, int]:
c_time = cache_time
else:
# Force the input to be a string
str_c_time = str(cache_time).lower().replace(' ', '')
# Get the numerical part of the input
c_time = float(''.join(re.findall('[^a-z]', str_c_time)))
# Get the str part of the input - for the format
age_fmt = ''.join(re.findall('[a-z]', str_c_time))
# Hash the file's name to save the query and the data
file_name = hashlib.sha224(bytes(sql, 'utf-8')).hexdigest().replace('-', 'm') if cache_file_name is None else cache_file_name
# Set the query and data paths
query_path = _pycof_folders('queries')
data_path = _pycof_folders('data')
# Chec if the cached data already exists
if (query_type.upper() == "SELECT") & (file_name in os.listdir(data_path)):
# If file exists, checks its age
age = file_age(data_path + file_name, format=age_fmt)
if (query_type.upper() == "SELECT") & (age < c_time):
# If file is younger than c_time, we read the cached data
verbose_display('Reading cached data', verbose)
read = pd.read_csv(data_path + file_name, quoting=csv.QUOTE_NONNUMERIC, low_memory=False)
else:
# Else we execute the SQL query and save the ouput + the query
verbose_display('Execute SQL query and cache the data - updating cache', verbose)
conn = tunnel.connector()
read = pd.read_sql(sql, conn)
conn.close()
write(sql, query_path + file_name, perm='w', verbose=verbose)
read.to_csv(data_path + file_name, index=False, quoting=csv.QUOTE_NONNUMERIC)
else:
# If the file does not even exist, we execute SQL, save the query and its output
verbose_display('Execute SQL query and cache the data', verbose)
conn = tunnel.connector()
read = pd.read_sql(sql, conn)
conn.close()
write(sql, query_path + file_name, perm='w', verbose=verbose)
read.to_csv(data_path + file_name, index=False, quoting=csv.QUOTE_NONNUMERIC)
def age(fmt='seconds'):
return file_age(file_path=os.path.join(data_path, file_name), format=fmt)
read.meta = SimpleNamespace()
read.meta.cache = SimpleNamespace()
read.meta.cache.creation_date = datetime.datetime.now() - datetime.timedelta(seconds=age())
read.meta.cache.cache_path = os.path.join(data_path, file_name)
read.meta.cache.query_path = os.path.join(query_path, file_name)
read.meta.cache._age_format = age_fmt
read.meta.cache._age_value = c_time
read.meta.cache._cache_time = cache_time
read.meta.cache.age = age
return read
# #######################################################################################################################
# Get DB credentials
def _get_credentials(config, connection='direct'):
useIAM = connection.lower() == 'iam'
# Access DB credentials
try:
hostname = config.get('DB_HOST') # Read the host name value from the config dictionnary
except Exception:
raise ValueError('Could not get the hostname')
port = config.get('DB_PORT') # Get the port from the config file and convert it to int
user = config.get('DB_USER') # Get the user name for connecting to the DB
password = config.get('DB_PASSWORD') # Get the DB
# AWS and Redshift specific parameters
database = config.get('DB_DATABASE') # For Redshift, use the database, for MySQL set it by default to ""
access_key = config.get("AWS_ACCESS_KEY_ID")
secret_key = config.get("AWS_SECRET_ACCESS_KEY")
region = config.get("REGION")
cluster_name = config.get("CLUSTER_NAME")
boto_error = """Cannot initialize the boto3 session. Please check your config file and ensure awscli is installed.\n
To install awcli, please run: \n
pip install awscli -y && aws configure\n
Values from `aws configure` command can remain empty.
"""
# Get AWS credentials with access and secret key
if (useIAM) & (secret_key in [None, 'None', '']):
try:
session = boto3.Session(profile_name='default')
except Exception:
raise ConnectionError(boto_error)
elif (useIAM):
try:
session = boto3.Session(aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=region)
except Exception:
session = boto3.Session(profile_name='default', aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=region)
if useIAM:
rd_client = session.client('redshift')
cluster_creds = rd_client.get_cluster_credentials(
DbUser=user,
DbName=database,
ClusterIdentifier=cluster_name,
AutoCreate=False)
# Update user and password
config['DB_USER'] = cluster_creds['DbUser']
config['DB_PASSWORD'] = cluster_creds['DbPassword']
return config
# #######################################################################################################################
# Fake SSH tunnel for direct connections
class _fake_tunnel:
def __init__():
pass
def close():
pass
# #######################################################################################################################
# Get SSH tunnel
class SSHTunnel:
def __init__(self, config, connection='direct', engine='default'):
self.connection = connection.lower()
self.config = config
self.engine = engine
def __enter__(self):
if self.connection == 'ssh':
try:
ssh_port = 22 if self.config.get('SSH_PORT') is None else int(self.config.get('SSH_PORT'))
remote_addr = 'localhost' if self.config.get('DB_REMOTE_HOST') is None else self.config.get('DB_REMOTE_HOST')
remote_port = 3306 if self.config.get('DB_REMOTE_PORT') is None else int(self.config.get('DB_REMOTE_PORT'))
hostname = '127.0.0.1' if self.config.get('DB_LOCAL_HOST') is None else int(self.config.get('DB_LOCAL_HOST'))
if (self.config.get('SSH_PASSWORD') is None) & (self.config.get('SSH_KEY') is None):
# Try to get the default SSH location if neither a password nor a path is provided
ssh_path = os.path.join(_pycof_folders('home'), '.ssh', 'id_rsa')
else:
ssh_path = self.config.get('SSH_KEY')
self.tunnel = sshtunnel.SSHTunnelForwarder((self.config.get('DB_HOST'), 22),
ssh_username=self.config.get('SSH_USER'),
ssh_password=self.config.get('SSH_PASSWORD'),
ssh_pkey=ssh_path,
remote_bind_address=(remote_addr, remote_port))
self.tunnel.daemon_forward_servers = True
self.tunnel.connector = self._define_connector
except Exception:
raise ConnectionError('Failed to establish SSH connection with host')
else:
self.tunnel = _fake_tunnel
self.tunnel.connector = self._define_connector
return self.tunnel
def __exit__(self, exc_type, exc_val, exc_tb):
self.tunnel.close()
def _define_connector(self):
"""Define the SQL connector for executing SQL.
:param config: Credentials file containing authentiation information, defaults to {}.
:type config: :obj:`dict`, optional
:param engine: SQL engine to use ('Redshift', 'SQLite' or 'MySQL'), defaults to 'default'
:type engine: str, optional
:param connection: Connextion type. Cqn either be 'direct' or 'SSH', defaults to 'direct'
:type connection: str, optional
:return: Connector, cursor and tunnel
"""
hostname = self.config.get('DB_HOST')
user = self.config.get('DB_USER')
password = self.config.get('DB_PASSWORD')
port = self.config.get('DB_PORT')
database = self.config.get('DB_DATABASE')
if self.connection.lower() == 'ssh':
ssh_port = 22 if self.config.get('SSH_PORT') is None else int(self.config.get('SSH_PORT'))
remote_addr = 'localhost' if self.config.get('DB_REMOTE_HOST') is None else self.config.get('DB_REMOTE_HOST')
remote_port = 3306 if self.config.get('DB_REMOTE_PORT') is None else int(self.config.get('DB_REMOTE_PORT'))
hostname = '127.0.0.1' if self.config.get('DB_LOCAL_HOST') is None else int(self.config.get('DB_LOCAL_HOST'))
self.tunnel.start()
port = self.tunnel.local_bind_port
# ### Initiate sql connection to the Database
# Redshift
if ('redshift' in hostname.lower().split('.')) or (self.engine.lower() == 'redshift'):
try:
connector = psycopg2.connect(host=hostname, port=int(port), user=user, password=password, database=database)
except Exception:
raise ConnectionError('Failed to connect to the Redshfit cluster')
# SQLite
elif (hostname.lower().find('sqlite') > -1) or (str(port).lower() in ['sqlite', 'sqlite3']) or (self.engine.lower() in ['sqlite', 'sqlite3']):
try:
connector = sqlite3.connect(hostname)
except Exception:
raise ConnectionError('Failed to connect to the sqlite database')
# MySQL
else:
try:
# Add new encoder of numpy.float64
pymysql.converters.encoders[np.float64] = pymysql.converters.escape_float
pymysql.converters.conversions = pymysql.converters.encoders.copy()
pymysql.converters.conversions.update(pymysql.converters.decoders)
# Create connection
connector = pymysql.connect(host=hostname, port=int(port), user=user, password=password)
except Exception:
raise ConnectionError('Failed to connect to the MySQL database')
return connector
# #######################################################################################################################
# Insert data to DB
def _insert_data(data, table, connector, autofill_nan=False, verbose=False):
# Check if user defined the table to publish
if table == "":
raise SyntaxError('Destination table not defined by user')
# Create the column string and the number of columns used for push query
columns_string = (', ').join(list(data.columns))
col_num = len(list(data.columns)) - 1
# calculate the size of the dataframe to be pushed
num = len(data)
batches = int(num / 10000) + 1
# #######################################################################################################################
# Transform date columns to str before loading
warnings.filterwarnings('ignore') # Removing filter warning when changing data type
dt_cols = []
for col in data.columns:
if data[col].dtype == 'object':
try:
data.loc[:, col] = pd.to_datetime(data[col]).apply(str)
dt_cols += [col]
except ValueError:
pass
elif data[col].dtype in [np.dtype('<M8[ns]'), np.dtype('datetime64[ns]')]:
dt_cols += [col]
data.loc[:, col] = data[col].apply(str)
warnings.filterwarnings('default') # Putting warning back
# #######################################################################################################################
# Fill Nan values if requested by user
if autofill_nan:
"""
For each row of the dataset, we fill the NaN values
with a specific string that will be replaced by None
value (converted by NULL in MySQL). This aims at avoiding
the PyMySQL 1054 error.
"""
data_load = []
for ls in [v for v in data.fillna('@@@@EMPTYDATA@@@@').values.tolist()]:
data_load += [[None if vv == '@@@@EMPTYDATA@@@@' else vv for vv in ls]]
else:
data_load = data.values.tolist()
# #######################################################################################################################
# Push 10k batches iterativeley and then push the remainder
if type(connector) == sqlite3.Connection:
insert_string = f'INSERT INTO {table} ({columns_string}) VALUES ({"?, "*col_num} ? )'
else:
insert_string = f'INSERT INTO {table} ({columns_string}) VALUES ({"%s, "*col_num} %s )'
if num == 0:
raise ValueError('len(data) == 0 -> No data to insert')
elif num > 10000:
rg = tqdm(range(0, batches - 1)) if verbose else range(0, batches - 1)
cursor = connector.cursor()
for i in rg:
cursor.executemany(insert_string, data_load[i * 10000:(i + 1) * 10000])
connector.commit()
# Push the remainder
cursor.executemany(insert_string, data_load[(batches - 1) * 10000:])
connector.commit()
else:
# Push everything if less then 10k (SQL Server limit)
cursor = connector.cursor()
cursor.executemany(insert_string, data_load)
connector.commit()
| [
"os.listdir",
"boto3.Session",
"warnings.filterwarnings",
"pandas.read_csv",
"pymysql.converters.encoders.copy",
"numpy.dtype",
"datetime.datetime.now",
"pymysql.converters.conversions.update",
"re.findall",
"sqlite3.connect",
"pandas.to_datetime",
"pandas.read_sql",
"os.path.join",
"types... | [((2906, 2923), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '()\n', (2921, 2923), False, 'from types import SimpleNamespace\n'), ((2946, 2963), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '()\n', (2961, 2963), False, 'from types import SimpleNamespace\n'), ((3093, 3127), 'os.path.join', 'os.path.join', (['data_path', 'file_name'], {}), '(data_path, file_name)\n', (3105, 3127), False, 'import os\n'), ((3161, 3196), 'os.path.join', 'os.path.join', (['query_path', 'file_name'], {}), '(query_path, file_name)\n', (3173, 3196), False, 'import os\n'), ((11750, 11783), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (11773, 11783), False, 'import warnings\n'), ((12263, 12297), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""default"""'], {}), "('default')\n", (12286, 12297), False, 'import warnings\n'), ((2578, 2600), 'pandas.read_sql', 'pd.read_sql', (['sql', 'conn'], {}), '(sql, conn)\n', (2589, 2600), True, 'import pandas as pd\n'), ((3000, 3023), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3021, 3023), False, 'import datetime\n'), ((1042, 1073), 're.findall', 're.findall', (['"""[a-z]"""', 'str_c_time'], {}), "('[a-z]', str_c_time)\n", (1052, 1073), False, 'import re\n'), ((1484, 1505), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (1494, 1505), False, 'import os\n'), ((1822, 1909), 'pandas.read_csv', 'pd.read_csv', (['(data_path + file_name)'], {'quoting': 'csv.QUOTE_NONNUMERIC', 'low_memory': '(False)'}), '(data_path + file_name, quoting=csv.QUOTE_NONNUMERIC, low_memory\n =False)\n', (1833, 1909), True, 'import pandas as pd\n'), ((2145, 2167), 'pandas.read_sql', 'pd.read_sql', (['sql', 'conn'], {}), '(sql, conn)\n', (2156, 2167), True, 'import pandas as pd\n'), ((4827, 4864), 'boto3.Session', 'boto3.Session', ([], {'profile_name': '"""default"""'}), "(profile_name='default')\n", (4840, 4864), False, 'import boto3\n'), ((924, 956), 're.findall', 're.findall', (['"""[^a-z]"""', 'str_c_time'], {}), "('[^a-z]', str_c_time)\n", (934, 956), False, 'import re\n'), ((2841, 2875), 'os.path.join', 'os.path.join', (['data_path', 'file_name'], {}), '(data_path, file_name)\n', (2853, 2875), False, 'import os\n'), ((4991, 5093), 'boto3.Session', 'boto3.Session', ([], {'aws_access_key_id': 'access_key', 'aws_secret_access_key': 'secret_key', 'region_name': 'region'}), '(aws_access_key_id=access_key, aws_secret_access_key=\n secret_key, region_name=region)\n', (5004, 5093), False, 'import boto3\n'), ((5137, 5262), 'boto3.Session', 'boto3.Session', ([], {'profile_name': '"""default"""', 'aws_access_key_id': 'access_key', 'aws_secret_access_key': 'secret_key', 'region_name': 'region'}), "(profile_name='default', aws_access_key_id=access_key,\n aws_secret_access_key=secret_key, region_name=region)\n", (5150, 5262), False, 'import boto3\n'), ((10156, 10181), 'sqlite3.connect', 'sqlite3.connect', (['hostname'], {}), '(hostname)\n', (10171, 10181), False, 'import sqlite3\n'), ((10531, 10565), 'pymysql.converters.encoders.copy', 'pymysql.converters.encoders.copy', ([], {}), '()\n', (10563, 10565), False, 'import pymysql\n'), ((10582, 10648), 'pymysql.converters.conversions.update', 'pymysql.converters.conversions.update', (['pymysql.converters.decoders'], {}), '(pymysql.converters.decoders)\n', (10619, 10648), False, 'import pymysql\n'), ((12128, 12147), 'numpy.dtype', 'np.dtype', (['"""<M8[ns]"""'], {}), "('<M8[ns]')\n", (12136, 12147), True, 'import numpy as np\n'), ((12149, 12175), 'numpy.dtype', 'np.dtype', (['"""datetime64[ns]"""'], {}), "('datetime64[ns]')\n", (12157, 12175), True, 'import numpy as np\n'), ((11973, 11998), 'pandas.to_datetime', 'pd.to_datetime', (['data[col]'], {}), '(data[col])\n', (11987, 11998), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
VOC2012 preprocessing to speed up training
"""
import os
import sys
from glob import glob
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from models.SSD300 import SSD300
from .VOC2012ManagerObjDetection import VOC2012ManagerObjDetection
sys.path.insert(1, '../')
def saveGTdata(voc2012path, output_path):
"""
Method to save data in a proper format to train the network
Args:
- (str) path to save data
"""
if not os.path.exists(output_path):
os.mkdir(output_path)
db_manager = VOC2012ManagerObjDetection(voc2012path + '/', batch_size=32, floatType=32)
SSD300_model = SSD300(21, floatType=32)
for i, batch in enumerate(tqdm(db_manager.batches)):
# get data from batch
imgs, confs, locs = db_manager.getImagesAndGtSpeedUp(batch, SSD300_model.default_boxes)
np.save(output_path + "/imgs_{:05d}.npy".format(i), imgs, allow_pickle=True)
np.save(output_path + "/confs_{:05d}.npy".format(i), confs, allow_pickle=True)
np.save(output_path + "locs_{:05d}.npy".format(i), locs, allow_pickle=True)
def loadGTdata(path, nb_data_to_load=-1):
"""
Method to load needed data for training
N: batch size
B: batch size
O: number of objects in a given image
D: number of default boxes
Args:
- (str) path to load
Return:
- (list of tf.Tensor) images (B, 300, 300, 3)
- (list of tf.Tensor) confs gt (N, B, O, D)
- (list of tf.Tensor) locs gt (N, B, O, D, 4)
"""
imgs = []
for batch in tqdm(sorted(glob(path + "/imgs*.npy"))[:nb_data_to_load]):
# get data from batch
imgs.append(tf.convert_to_tensor(np.load(batch, allow_pickle=True)))
confs = []
for batch in tqdm(sorted(glob(path + "/confs*.npy"))[:nb_data_to_load]):
# get data from batch
confs.append(tf.convert_to_tensor(np.load(batch, allow_pickle=True)))
locs = []
for batch in tqdm(sorted(glob(path + "/locs*.npy"))[:nb_data_to_load]):
# get data from batch
locs.append(tf.convert_to_tensor(np.load(batch, allow_pickle=True)))
return imgs, confs, locs
def loadSpecificGTdata(path, idx):
"""
Method to load a particular batch
B: batch size
N: number of objects in a given image
D: number of default boxes
Args:
- (str) path to load
Return:
- (list of tf.Tensor) images (B, 300, 300, 3)
- (list of tf.Tensor) confs gt (B, N, D)
- (list of tf.Tensor) locs gt (B, N, D, 4)
"""
batch = sorted(glob(path + "/imgs*.npy"))[idx]
imgs = tf.convert_to_tensor(np.load(batch, allow_pickle=True))
batch = sorted(glob(path + "/confs*.npy"))[idx]
confs = tf.convert_to_tensor(np.load(batch, allow_pickle=True))
batch = sorted(glob(path + "/locs*.npy"))[idx]
locs = tf.convert_to_tensor(np.load(batch, allow_pickle=True))
return imgs, confs, locs
| [
"os.mkdir",
"tqdm.tqdm",
"numpy.load",
"os.path.exists",
"sys.path.insert",
"models.SSD300.SSD300",
"glob.glob"
] | [((311, 336), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../"""'], {}), "(1, '../')\n", (326, 336), False, 'import sys\n'), ((687, 711), 'models.SSD300.SSD300', 'SSD300', (['(21)'], {'floatType': '(32)'}), '(21, floatType=32)\n', (693, 711), False, 'from models.SSD300 import SSD300\n'), ((517, 544), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (531, 544), False, 'import os\n'), ((554, 575), 'os.mkdir', 'os.mkdir', (['output_path'], {}), '(output_path)\n', (562, 575), False, 'import os\n'), ((742, 766), 'tqdm.tqdm', 'tqdm', (['db_manager.batches'], {}), '(db_manager.batches)\n', (746, 766), False, 'from tqdm import tqdm\n'), ((2675, 2708), 'numpy.load', 'np.load', (['batch'], {'allow_pickle': '(True)'}), '(batch, allow_pickle=True)\n', (2682, 2708), True, 'import numpy as np\n'), ((2796, 2829), 'numpy.load', 'np.load', (['batch'], {'allow_pickle': '(True)'}), '(batch, allow_pickle=True)\n', (2803, 2829), True, 'import numpy as np\n'), ((2915, 2948), 'numpy.load', 'np.load', (['batch'], {'allow_pickle': '(True)'}), '(batch, allow_pickle=True)\n', (2922, 2948), True, 'import numpy as np\n'), ((2611, 2636), 'glob.glob', 'glob', (["(path + '/imgs*.npy')"], {}), "(path + '/imgs*.npy')\n", (2615, 2636), False, 'from glob import glob\n'), ((2730, 2756), 'glob.glob', 'glob', (["(path + '/confs*.npy')"], {}), "(path + '/confs*.npy')\n", (2734, 2756), False, 'from glob import glob\n'), ((2851, 2876), 'glob.glob', 'glob', (["(path + '/locs*.npy')"], {}), "(path + '/locs*.npy')\n", (2855, 2876), False, 'from glob import glob\n'), ((1620, 1645), 'glob.glob', 'glob', (["(path + '/imgs*.npy')"], {}), "(path + '/imgs*.npy')\n", (1624, 1645), False, 'from glob import glob\n'), ((1738, 1771), 'numpy.load', 'np.load', (['batch'], {'allow_pickle': '(True)'}), '(batch, allow_pickle=True)\n', (1745, 1771), True, 'import numpy as np\n'), ((1819, 1845), 'glob.glob', 'glob', (["(path + '/confs*.npy')"], {}), "(path + '/confs*.npy')\n", (1823, 1845), False, 'from glob import glob\n'), ((1939, 1972), 'numpy.load', 'np.load', (['batch'], {'allow_pickle': '(True)'}), '(batch, allow_pickle=True)\n', (1946, 1972), True, 'import numpy as np\n'), ((2019, 2044), 'glob.glob', 'glob', (["(path + '/locs*.npy')"], {}), "(path + '/locs*.npy')\n", (2023, 2044), False, 'from glob import glob\n'), ((2137, 2170), 'numpy.load', 'np.load', (['batch'], {'allow_pickle': '(True)'}), '(batch, allow_pickle=True)\n', (2144, 2170), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
import argparse
import os
from os.path import join
import numpy as np
import torch
import pandas as pd
import sklearn
from sklearn.metrics.pairwise import cosine_similarity
from torch.utils.data import Dataset
from keras.preprocessing.sequence import pad_sequences
from utils import feature_utils
from utils import data_utils
from utils import settings
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') # include timestamp
class AffCNNMatchDataset(Dataset):
def __init__(self, file_dir, matrix_size1, matrix_size2, seed, shuffle, args, use_emb=True):
self.file_dir = file_dir
self.matrix_size_1_long = matrix_size1
self.matrix_size_2_short = matrix_size2
self.use_emb = use_emb
if self.use_emb:
self.pretrain_emb = torch.load(os.path.join(settings.OUT_DIR, "rnn_init_word_emb.emb"))
self.tokenizer = data_utils.load_large_obj(settings.OUT_DIR, "tokenizer_all_domain.pkl")
pos_pairs = data_utils.load_json(file_dir, "label_data_aff_zhoushao.json")[:600]
pos_pairs = [({"name": p["affiliation"]}, {"DisplayName": p["label"]}) for p in pos_pairs if p["label"] != "[NIF]"]
neg_pairs = data_utils.load_json(file_dir, 'train_negative_affi_clean.json')[:600]
neg_pairs = [(p['aminer_affi'], p['mag_affi']) for p in neg_pairs]
pairs_add = data_utils.load_json(file_dir, "mag_aminer_hard_correct_zfj_copy.json")
print("add pairs", len(pairs_add))
pos_pairs += [(p['aminer_affi'], p['mag_affi']) for p in pairs_add if p["label_zfj"] == "1"]
neg_pairs += [(p['aminer_affi'], p['mag_affi']) for p in pairs_add if p["label_zfj"] == "0"]
pos_pairs = pos_pairs[-len(neg_pairs):] # no balance for relation exp part
labels = [1] * len(pos_pairs) + [0] * len(neg_pairs)
print("n_pos", len(pos_pairs), "n_neg", len(neg_pairs))
pairs = pos_pairs + neg_pairs # label balanced is important
n_matrix = len(pairs)
self.X_long = np.zeros((n_matrix, self.matrix_size_1_long, self.matrix_size_1_long))
self.X_short = np.zeros((n_matrix, self.matrix_size_2_short, self.matrix_size_2_short))
self.Y = np.zeros(n_matrix, dtype=np.long)
count = 0
for i, pair in enumerate(pairs):
if i % 100 == 0:
print('pairs to matrices', i)
item_a, item_m = pair
cur_y = labels[i]
matrix1 = self.sentences_long_to_matrix(item_a['name'], item_m['DisplayName'])
self.X_long[count] = feature_utils.scale_matrix(matrix1)
matrix2 = self.sentences_short_to_matrix_2(item_a['name'], item_m['DisplayName'])
self.X_short[count] = feature_utils.scale_matrix(matrix2)
self.Y[count] = cur_y
count += 1
print("shuffle", shuffle)
if shuffle:
self.X_long, self.X_short, self.Y = sklearn.utils.shuffle(
self.X_long, self.X_short, self.Y,
random_state=seed
)
self.N = len(self.Y)
N = self.N
n_train = int(self.N*0.6)
n_valid = int(self.N*0.2)
n_test = N - n_train - n_valid
# n_train = 800
# n_valid = 200
# n_test = 200
train_data = {}
train_data["x1"] = self.X_long[:n_train]
train_data["x2"] = self.X_short[:n_train]
train_data["y"] = self.Y[:n_train]
print("train labels", len(train_data["y"]))
test_data = {}
test_data["x1"] = self.X_long[(n_train+n_valid): (n_train+n_valid+n_test)]
test_data["x2"] = self.X_short[(n_train+n_valid): (n_train+n_valid+n_test)]
test_data["y"] = self.Y[(n_train+n_valid): (n_train+n_valid+n_test)]
print("test labels", len(test_data["y"]), test_data["y"])
valid_data = {}
valid_data["x1"] = self.X_long[n_train:(n_train+n_valid)]
valid_data["x2"] = self.X_short[n_train:(n_train+n_valid)]
valid_data["y"] = self.Y[n_train:(n_train+n_valid)]
print("valid labels", len(valid_data["y"]), valid_data["y"])
out_dir = join(settings.DATA_DIR, "dom-adpt")
os.makedirs(out_dir, exist_ok=True)
data_utils.dump_large_obj(train_data, out_dir, "aff_train.pkl")
data_utils.dump_large_obj(test_data, out_dir, "aff_test.pkl")
data_utils.dump_large_obj(valid_data, out_dir, "aff_valid.pkl")
def __len__(self):
return self.N
def __getitem__(self, idx):
return self.X_long[idx], self.X_short[idx], self.Y[idx]
def sentences_long_to_matrix(self, title1, title2):
if self.use_emb:
twords1 = self.tokenizer.texts_to_sequences([title1])[0][: self.matrix_size_1_long]
twords2 = self.tokenizer.texts_to_sequences([title2])[0][: self.matrix_size_1_long]
else:
twords1 = feature_utils.get_words(title1)[: self.matrix_size_1_long]
twords2 = feature_utils.get_words(title2)[: self.matrix_size_1_long]
matrix = -np.ones((self.matrix_size_1_long, self.matrix_size_1_long))
for i, word1 in enumerate(twords1):
for j, word2 in enumerate(twords2):
v = -1
if word1 == word2:
v = 1
elif self.use_emb:
v = cosine_similarity(self.pretrain_emb[word1].reshape(1, -1),
self.pretrain_emb[word2].reshape(1, -1))[0][0]
# print("cos", v)
matrix[i][j] = v
return matrix
def sentences_short_to_matrix_2(self, title1, title2):
if self.use_emb:
twords1 = self.tokenizer.texts_to_sequences([title1])[0]
twords2 = self.tokenizer.texts_to_sequences([title2])[0]
else:
twords1 = title1.split()
twords2 = title2.split()
overlap = set(twords1).intersection(twords2)
new_seq_mag = []
new_seq_aminer = []
for w in twords1:
if w in overlap:
new_seq_mag.append(w)
for w in twords2:
if w in overlap:
new_seq_aminer.append(w)
twords1 = new_seq_mag[: self.matrix_size_2_short]
twords2 = new_seq_aminer[: self.matrix_size_2_short]
matrix = -np.ones((self.matrix_size_2_short, self.matrix_size_2_short))
for i, word1 in enumerate(twords1):
for j, word2 in enumerate(twords2):
v = -1
if word1 == word2:
v = 1
elif self.use_emb:
v = cosine_similarity(self.pretrain_emb[word1].reshape(1, -1),
self.pretrain_emb[word2].reshape(1, -1))[0][0]
# print("cos", v)
matrix[i][j] = v
return matrix
class AffRNNMatchDataset(Dataset):
def __init__(self, file_dir, max_seq1_len, max_seq2_len, shuffle, seed, args):
self.max_seq1_len = max_seq1_len
self.max_seq2_len = max_seq2_len
pos_pairs = data_utils.load_json(file_dir, "label_data_aff_zhoushao.json")[:600]
pos_pairs = [({"name": p["affiliation"]}, {"DisplayName": p["label"]}) for p in pos_pairs if p["label"] != "[NIF]"]
neg_pairs = data_utils.load_json(file_dir, 'train_negative_affi_clean.json')[:600]
neg_pairs = [(p['aminer_affi'], p['mag_affi']) for p in neg_pairs]
pairs_add = data_utils.load_json(file_dir, "mag_aminer_hard_correct_zfj_copy.json")
print("add pairs", len(pairs_add))
pos_pairs += [(p['aminer_affi'], p['mag_affi']) for p in pairs_add if p["label_zfj"] == "1"]
neg_pairs += [(p['aminer_affi'], p['mag_affi']) for p in pairs_add if p["label_zfj"] == "0"]
pos_pairs = pos_pairs[-len(neg_pairs):]
self.labels = [1] * len(pos_pairs) + [0] * len(neg_pairs)
print("n_pos", len(pos_pairs), "n_neg", len(neg_pairs))
pairs = pos_pairs + neg_pairs # label balanced is important
t = data_utils.load_large_obj(settings.OUT_DIR, "tokenizer_all_domain.pkl")
self.vocab_size = len(t.word_counts)
print("vocab size", self.vocab_size)
self.mag = t.texts_to_sequences([p[1]["DisplayName"] for p in pairs])
for mag_aff in self.mag:
for word_idx in mag_aff:
assert word_idx <= settings.MAX_WORD_TOKEN_NUM + 1
self.aminer = t.texts_to_sequences([p[0]["name"] for p in pairs])
self.mag = pad_sequences(self.mag, maxlen=self.max_seq1_len)
self.aminer = pad_sequences(self.aminer, maxlen=self.max_seq1_len)
self.calc_keyword_seqs()
self.mag_keywords = pad_sequences(self.mag_keywords, maxlen=max_seq2_len)
self.aminer_keywords = pad_sequences(self.aminer_keywords, maxlen=max_seq2_len)
if shuffle:
self.mag, self.aminer, self.mag_keywords, self.aminer_keywords, self.labels = sklearn.utils.shuffle(
self.mag, self.aminer, self.mag_keywords, self.aminer_keywords, self.labels,
random_state=seed
)
self.N = len(self.labels)
N = self.N
n_train = int(self.N*0.6)
n_valid = int(self.N*0.2)
n_test = N - n_train - n_valid
# n_train = 800
# n_valid = 200
# n_test = 200
train_data = {}
train_data["x1_seq1"] = self.mag[:n_train]
train_data["x1_seq2"] = self.mag_keywords[:n_train]
train_data["x2_seq1"] = self.aminer[:n_train]
train_data["x2_seq2"] = self.aminer_keywords[:n_train]
train_data["y"] = self.labels[:n_train]
train_data["vocab_size"] = self.vocab_size
print("train labels", len(train_data["y"]))
test_data = {}
test_data["x1_seq1"] = self.mag[(n_train+n_valid):(n_train+n_valid+n_test)]
test_data["x1_seq2"] = self.mag_keywords[(n_train+n_valid):(n_train+n_valid+n_test)]
test_data["x2_seq1"] = self.aminer[(n_train+n_valid):(n_train+n_valid+n_test)]
test_data["x2_seq2"] = self.aminer_keywords[(n_train+n_valid):(n_train+n_valid+n_test)]
test_data["y"] = self.labels[(n_train+n_valid):(n_train+n_valid+n_test)]
print("test labels", len(test_data["y"]))
valid_data = {}
valid_data["x1_seq1"] = self.mag[n_train:(n_train+n_valid)]
valid_data["x1_seq2"] = self.mag_keywords[n_train:(n_train+n_valid)]
valid_data["x2_seq1"] = self.aminer[n_train:(n_train+n_valid)]
valid_data["x2_seq2"] = self.aminer_keywords[n_train:(n_train+n_valid)]
valid_data["y"] = self.labels[n_train:(n_train+n_valid)]
print("valid labels", len(valid_data["y"]))
out_dir = join(settings.DATA_DIR, "dom-adpt")
os.makedirs(out_dir, exist_ok=True)
data_utils.dump_large_obj(train_data, out_dir, "aff_rnn_train.pkl")
data_utils.dump_large_obj(test_data, out_dir, "aff_rnn_test.pkl")
data_utils.dump_large_obj(valid_data, out_dir, "aff_rnn_valid.pkl")
def calc_keyword_seqs(self):
N = len(self.mag)
mag_keywords = []
aminer_keywords = []
for i in range(N):
cur_v_mag = self.mag[i]
cur_v_aminer = self.aminer[i]
overlap = set(cur_v_mag).intersection(cur_v_aminer)
new_seq_mag = []
new_seq_aminer = []
for w in cur_v_mag:
if w in overlap:
new_seq_mag.append(w)
for w in cur_v_aminer:
if w in overlap:
new_seq_aminer.append(w)
mag_keywords.append(new_seq_mag)
aminer_keywords.append(new_seq_aminer)
self.mag_keywords = mag_keywords
self.aminer_keywords = aminer_keywords
# print("mag keywords", self.mag_keywords)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--file-dir', type=str, default=settings.AFF_DATA_DIR, help="Input file directory")
parser.add_argument('--matrix-size1', type=int, default=7, help='Matrix size 1.')
parser.add_argument('--matrix-size2', type=int, default=4, help='Matrix size 2.')
parser.add_argument('--train-num', type=int, default=600, help='Training size.')
parser.add_argument('--test-num', type=int, default=200, help='Testing size.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--shuffle', action='store_true', default=True, help="Shuffle dataset")
parser.add_argument('--max-sequence-length', type=int, default=17,
help="Max sequence length for raw sequences")
parser.add_argument('--max-key-sequence-length', type=int, default=8,
help="Max key sequence length for key sequences")
args = parser.parse_args()
dataset = AffCNNMatchDataset(args.file_dir, args.matrix_size1, args.matrix_size2, args.seed, shuffle=True, args=args, use_emb=False)
dataset = AffRNNMatchDataset(args.file_dir, args.max_sequence_length, args.max_key_sequence_length, shuffle=True, seed=args.seed, args=args)
| [
"utils.data_utils.load_json",
"argparse.ArgumentParser",
"logging.basicConfig",
"os.makedirs",
"keras.preprocessing.sequence.pad_sequences",
"utils.data_utils.dump_large_obj",
"numpy.zeros",
"utils.feature_utils.scale_matrix",
"utils.data_utils.load_large_obj",
"numpy.ones",
"sklearn.utils.shuff... | [((529, 556), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (546, 556), False, 'import logging\n'), ((557, 630), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(message)s"""'}), "(level=logging.INFO, format='%(asctime)s %(message)s')\n", (576, 630), False, 'import logging\n'), ((12072, 12097), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12095, 12097), False, 'import argparse\n'), ((1098, 1169), 'utils.data_utils.load_large_obj', 'data_utils.load_large_obj', (['settings.OUT_DIR', '"""tokenizer_all_domain.pkl"""'], {}), "(settings.OUT_DIR, 'tokenizer_all_domain.pkl')\n", (1123, 1169), False, 'from utils import data_utils\n'), ((1570, 1641), 'utils.data_utils.load_json', 'data_utils.load_json', (['file_dir', '"""mag_aminer_hard_correct_zfj_copy.json"""'], {}), "(file_dir, 'mag_aminer_hard_correct_zfj_copy.json')\n", (1590, 1641), False, 'from utils import data_utils\n'), ((2218, 2288), 'numpy.zeros', 'np.zeros', (['(n_matrix, self.matrix_size_1_long, self.matrix_size_1_long)'], {}), '((n_matrix, self.matrix_size_1_long, self.matrix_size_1_long))\n', (2226, 2288), True, 'import numpy as np\n'), ((2312, 2384), 'numpy.zeros', 'np.zeros', (['(n_matrix, self.matrix_size_2_short, self.matrix_size_2_short)'], {}), '((n_matrix, self.matrix_size_2_short, self.matrix_size_2_short))\n', (2320, 2384), True, 'import numpy as np\n'), ((2402, 2435), 'numpy.zeros', 'np.zeros', (['n_matrix'], {'dtype': 'np.long'}), '(n_matrix, dtype=np.long)\n', (2410, 2435), True, 'import numpy as np\n'), ((4328, 4363), 'os.path.join', 'join', (['settings.DATA_DIR', '"""dom-adpt"""'], {}), "(settings.DATA_DIR, 'dom-adpt')\n", (4332, 4363), False, 'from os.path import join\n'), ((4372, 4407), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (4383, 4407), False, 'import os\n'), ((4416, 4479), 'utils.data_utils.dump_large_obj', 'data_utils.dump_large_obj', (['train_data', 'out_dir', '"""aff_train.pkl"""'], {}), "(train_data, out_dir, 'aff_train.pkl')\n", (4441, 4479), False, 'from utils import data_utils\n'), ((4488, 4549), 'utils.data_utils.dump_large_obj', 'data_utils.dump_large_obj', (['test_data', 'out_dir', '"""aff_test.pkl"""'], {}), "(test_data, out_dir, 'aff_test.pkl')\n", (4513, 4549), False, 'from utils import data_utils\n'), ((4558, 4621), 'utils.data_utils.dump_large_obj', 'data_utils.dump_large_obj', (['valid_data', 'out_dir', '"""aff_valid.pkl"""'], {}), "(valid_data, out_dir, 'aff_valid.pkl')\n", (4583, 4621), False, 'from utils import data_utils\n'), ((7658, 7729), 'utils.data_utils.load_json', 'data_utils.load_json', (['file_dir', '"""mag_aminer_hard_correct_zfj_copy.json"""'], {}), "(file_dir, 'mag_aminer_hard_correct_zfj_copy.json')\n", (7678, 7729), False, 'from utils import data_utils\n'), ((8236, 8307), 'utils.data_utils.load_large_obj', 'data_utils.load_large_obj', (['settings.OUT_DIR', '"""tokenizer_all_domain.pkl"""'], {}), "(settings.OUT_DIR, 'tokenizer_all_domain.pkl')\n", (8261, 8307), False, 'from utils import data_utils\n'), ((8708, 8757), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['self.mag'], {'maxlen': 'self.max_seq1_len'}), '(self.mag, maxlen=self.max_seq1_len)\n', (8721, 8757), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((8780, 8832), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['self.aminer'], {'maxlen': 'self.max_seq1_len'}), '(self.aminer, maxlen=self.max_seq1_len)\n', (8793, 8832), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((8896, 8949), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['self.mag_keywords'], {'maxlen': 'max_seq2_len'}), '(self.mag_keywords, maxlen=max_seq2_len)\n', (8909, 8949), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((8981, 9037), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['self.aminer_keywords'], {'maxlen': 'max_seq2_len'}), '(self.aminer_keywords, maxlen=max_seq2_len)\n', (8994, 9037), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((10924, 10959), 'os.path.join', 'join', (['settings.DATA_DIR', '"""dom-adpt"""'], {}), "(settings.DATA_DIR, 'dom-adpt')\n", (10928, 10959), False, 'from os.path import join\n'), ((10968, 11003), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (10979, 11003), False, 'import os\n'), ((11012, 11079), 'utils.data_utils.dump_large_obj', 'data_utils.dump_large_obj', (['train_data', 'out_dir', '"""aff_rnn_train.pkl"""'], {}), "(train_data, out_dir, 'aff_rnn_train.pkl')\n", (11037, 11079), False, 'from utils import data_utils\n'), ((11088, 11153), 'utils.data_utils.dump_large_obj', 'data_utils.dump_large_obj', (['test_data', 'out_dir', '"""aff_rnn_test.pkl"""'], {}), "(test_data, out_dir, 'aff_rnn_test.pkl')\n", (11113, 11153), False, 'from utils import data_utils\n'), ((11162, 11229), 'utils.data_utils.dump_large_obj', 'data_utils.dump_large_obj', (['valid_data', 'out_dir', '"""aff_rnn_valid.pkl"""'], {}), "(valid_data, out_dir, 'aff_rnn_valid.pkl')\n", (11187, 11229), False, 'from utils import data_utils\n'), ((1191, 1253), 'utils.data_utils.load_json', 'data_utils.load_json', (['file_dir', '"""label_data_aff_zhoushao.json"""'], {}), "(file_dir, 'label_data_aff_zhoushao.json')\n", (1211, 1253), False, 'from utils import data_utils\n'), ((1404, 1468), 'utils.data_utils.load_json', 'data_utils.load_json', (['file_dir', '"""train_negative_affi_clean.json"""'], {}), "(file_dir, 'train_negative_affi_clean.json')\n", (1424, 1468), False, 'from utils import data_utils\n'), ((2758, 2793), 'utils.feature_utils.scale_matrix', 'feature_utils.scale_matrix', (['matrix1'], {}), '(matrix1)\n', (2784, 2793), False, 'from utils import feature_utils\n'), ((2922, 2957), 'utils.feature_utils.scale_matrix', 'feature_utils.scale_matrix', (['matrix2'], {}), '(matrix2)\n', (2948, 2957), False, 'from utils import feature_utils\n'), ((3118, 3193), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['self.X_long', 'self.X_short', 'self.Y'], {'random_state': 'seed'}), '(self.X_long, self.X_short, self.Y, random_state=seed)\n', (3139, 3193), False, 'import sklearn\n'), ((5234, 5293), 'numpy.ones', 'np.ones', (['(self.matrix_size_1_long, self.matrix_size_1_long)'], {}), '((self.matrix_size_1_long, self.matrix_size_1_long))\n', (5241, 5293), True, 'import numpy as np\n'), ((6516, 6577), 'numpy.ones', 'np.ones', (['(self.matrix_size_2_short, self.matrix_size_2_short)'], {}), '((self.matrix_size_2_short, self.matrix_size_2_short))\n', (6523, 6577), True, 'import numpy as np\n'), ((7279, 7341), 'utils.data_utils.load_json', 'data_utils.load_json', (['file_dir', '"""label_data_aff_zhoushao.json"""'], {}), "(file_dir, 'label_data_aff_zhoushao.json')\n", (7299, 7341), False, 'from utils import data_utils\n'), ((7492, 7556), 'utils.data_utils.load_json', 'data_utils.load_json', (['file_dir', '"""train_negative_affi_clean.json"""'], {}), "(file_dir, 'train_negative_affi_clean.json')\n", (7512, 7556), False, 'from utils import data_utils\n'), ((9149, 9271), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['self.mag', 'self.aminer', 'self.mag_keywords', 'self.aminer_keywords', 'self.labels'], {'random_state': 'seed'}), '(self.mag, self.aminer, self.mag_keywords, self.\n aminer_keywords, self.labels, random_state=seed)\n', (9170, 9271), False, 'import sklearn\n'), ((1016, 1071), 'os.path.join', 'os.path.join', (['settings.OUT_DIR', '"""rnn_init_word_emb.emb"""'], {}), "(settings.OUT_DIR, 'rnn_init_word_emb.emb')\n", (1028, 1071), False, 'import os\n'), ((5075, 5106), 'utils.feature_utils.get_words', 'feature_utils.get_words', (['title1'], {}), '(title1)\n', (5098, 5106), False, 'from utils import feature_utils\n'), ((5156, 5187), 'utils.feature_utils.get_words', 'feature_utils.get_words', (['title2'], {}), '(title2)\n', (5179, 5187), False, 'from utils import feature_utils\n')] |
import tensorflow as tf
from tensorflow.python.ops.rnn import dynamic_rnn
import numpy as np
import sonnet as snt
from matplotlib.colors import to_rgb
import matplotlib.patches as patches
import shutil
import os
from dps import cfg
from dps.utils import Param
from dps.utils.tf import RNNCell, tf_mean_sum
from auto_yolo.tf_ops import render_sprites
from auto_yolo.models import yolo_air
from auto_yolo.models.core import xent_loss, AP, VariationalAutoencoder, normal_vae
class BboxCell(RNNCell):
def __init__(self, components, batch_indices_for_boxes, image_height, image_width):
self.components = components
self.batch_indices_for_boxes = batch_indices_for_boxes
self.image_height = image_height
self.image_width = image_width
def __call__(self, t, state, scope=None):
""" t is the index of the object in the whole batch, batch_idx is the index of the
batch element that the object belongs to, which we have pre-computed """
batch_idx = self.batch_indices_for_boxes[t[0, 0]-1]
nonzero_indices = tf.where(tf.equal(self.components[batch_idx, :, :], t[0, 0]))
maxs = tf.reduce_max(nonzero_indices, axis=0)
mins = tf.reduce_min(nonzero_indices, axis=0)
yt = mins[0] / self.image_height
xt = mins[1] / self.image_width
ys = (maxs[0] - mins[0]) / self.image_height
xs = (maxs[1] - mins[1]) / self.image_width
return tf.to_float(tf.stack([yt, xt, ys, xs])[None, :]), state
@property
def state_size(self):
return 1
@property
def output_size(self):
return 4
def zero_state(self, batch_size, dtype):
return tf.zeros((batch_size, 1), dtype=dtype)
class Baseline_Network(VariationalAutoencoder):
cc_threshold = Param()
object_shape = Param()
object_encoder = None
object_decoder = None
def __init__(self, env, updater, scope=None, **kwargs):
ap_iou_values = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
self.eval_funcs = {"AP_at_point_{}".format(int(10 * v)): AP(v) for v in ap_iou_values}
self.eval_funcs["AP"] = AP(ap_iou_values)
super(Baseline_Network, self).__init__(env, updater, scope=scope, **kwargs)
def _build_program_generator(self):
assert len(self.inp.shape) == 4
mask = tf.reduce_sum(tf.abs(self.inp - self._tensors["background"]), axis=3) >= self.cc_threshold
components = tf.contrib.image.connected_components(mask)
total_n_objects = tf.to_int32(tf.reduce_max(components))
indices = tf.range(1, total_n_objects + 1)
maxs = tf.reduce_max(components, axis=(1, 2))
# So that we don't pick up zeros.
for_mins = tf.where(mask, components, (total_n_objects + 1) * tf.ones_like(components))
mins = tf.reduce_min(for_mins, axis=(1, 2))
n_objects = tf.to_int32(tf.maximum((maxs - mins) + 1, 0))
under = indices[None, :] <= maxs[:, None]
over = indices[None, :] >= mins[:, None]
both = tf.to_int32(tf.logical_and(under, over))
batch_indices_for_objects = tf.argmax(both, axis=0)
assert_valid_batch_indices = tf.Assert(
tf.reduce_all(tf.equal(tf.reduce_sum(both, axis=0), 1)),
[both], name="assert_valid_batch_indices")
with tf.control_dependencies([assert_valid_batch_indices]):
batch_indices_for_objects = tf.identity(batch_indices_for_objects)
cell = BboxCell(components, batch_indices_for_objects, self.image_height, self.image_width)
# For each object, get its bounding box by using `indices` to figure out which element of
# `components` the object appears in, and then check that element
object_bboxes, _ = dynamic_rnn(
cell, indices[:, None, None], initial_state=cell.zero_state(1, tf.float32),
parallel_iterations=10, swap_memory=False, time_major=True)
# Couldn't I have just iterated through all object indices and used tf.where on `components` to simultaneously
# get both the bounding box and the batch index? Yes, but I think I thought that would be expensive
# (have to look through the entirety of `components` once for each object).
# Get rid of dummy batch dim created for dynamic_rnn
object_bboxes = object_bboxes[:, 0, :]
obj = tf.sequence_mask(n_objects)
routing = tf.reshape(tf.to_int32(obj), (-1,))
routing = tf.cumsum(routing, exclusive=True)
routing = tf.reshape(routing, tf.shape(obj))
obj = tf.to_float(obj[:, :, None])
self._tensors["normalized_box"] = tf.gather(object_bboxes, routing, axis=0)
self._tensors["obj"] = obj
self._tensors["n_objects"] = n_objects
self._tensors["max_objects"] = tf.reduce_max(n_objects)
def _build_program_interpreter(self):
# --- Get object attributes using object encoder ---
max_objects = self._tensors["max_objects"]
yt, xt, ys, xs = tf.split(self._tensors["normalized_box"], 4, axis=-1)
transform_constraints = snt.AffineWarpConstraints.no_shear_2d()
warper = snt.AffineGridWarper(
(self.image_height, self.image_width), self.object_shape, transform_constraints)
_boxes = tf.concat([xs, 2*(xt + xs/2) - 1, ys, 2*(yt + ys/2) - 1], axis=-1)
_boxes = tf.reshape(_boxes, (self.batch_size * max_objects, 4))
grid_coords = warper(_boxes)
grid_coords = tf.reshape(grid_coords, (self.batch_size, max_objects, *self.object_shape, 2,))
glimpse = tf.contrib.resampler.resampler(self.inp, grid_coords)
self._tensors["glimpse"] = tf.reshape(
glimpse, (self.batch_size, max_objects, *self.object_shape, self.image_depth))
object_encoder_in = tf.reshape(
glimpse, (self.batch_size * max_objects, *self.object_shape, self.image_depth))
attr = self.object_encoder(object_encoder_in, (1, 1, 2*self.A), self.is_training)
attr = tf.reshape(attr, (self.batch_size, max_objects, 2*self.A))
attr_mean, attr_log_std = tf.split(attr, [self.A, self.A], axis=-1)
attr_std = tf.exp(attr_log_std)
if not self.noisy:
attr_std = tf.zeros_like(attr_std)
attr, attr_kl = normal_vae(attr_mean, attr_std, self.attr_prior_mean, self.attr_prior_std)
if "attr" in self.no_gradient:
attr = tf.stop_gradient(attr)
attr_kl = tf.stop_gradient(attr_kl)
self._tensors["attr"] = tf.reshape(attr, (self.batch_size, max_objects, self.A))
self._tensors["attr_kl"] = tf.reshape(attr_kl, (self.batch_size, max_objects, self.A))
object_decoder_in = tf.reshape(attr, (self.batch_size * max_objects, 1, 1, self.A))
# --- Compute sprites from attr using object decoder ---
object_logits = self.object_decoder(
object_decoder_in, self.object_shape + (self.image_depth,), self.is_training)
objects = tf.nn.sigmoid(tf.clip_by_value(object_logits, -10., 10.))
self._tensors["objects"] = tf.reshape(
objects, (self.batch_size, max_objects, *self.object_shape, self.image_depth,))
objects = tf.reshape(objects, (self.batch_size, max_objects, *self.object_shape, self.image_depth,))
alpha = self._tensors["obj"][:, :, :, None, None] * tf.ones_like(objects[:, :, :, :, :1])
importance = tf.ones_like(objects[:, :, :, :, :1])
objects = tf.concat([objects, alpha, importance], axis=-1)
# -- Reconstruct image ---
scales = tf.concat([ys, xs], axis=-1)
scales = tf.reshape(scales, (self.batch_size, max_objects, 2))
offsets = tf.concat([yt, xt], axis=-1)
offsets = tf.reshape(offsets, (self.batch_size, max_objects, 2))
output = render_sprites.render_sprites(
objects,
self._tensors["n_objects"],
scales,
offsets,
self._tensors["background"]
)
self._tensors['output'] = output
def build_representation(self):
# --- build graph ---
self._build_program_generator()
if self.object_encoder is None:
self.object_encoder = cfg.build_object_encoder(scope="object_encoder")
if "object_encoder" in self.fixed_weights:
self.object_encoder.fix_variables()
if self.object_decoder is None:
self.object_decoder = cfg.build_object_decoder(scope="object_decoder")
if "object_decoder" in self.fixed_weights:
self.object_decoder.fix_variables()
self._build_program_interpreter()
# --- specify values to record ---
self.record_tensors(
n_objects=self._tensors["n_objects"],
attr=self._tensors["attr"]
)
# --- losses ---
if self.train_reconstruction:
output = self._tensors['output']
inp = self._tensors['inp']
self._tensors['per_pixel_reconstruction_loss'] = xent_loss(pred=output, label=inp)
self.losses['reconstruction'] = (
self.reconstruction_weight
* tf_mean_sum(self._tensors['per_pixel_reconstruction_loss'])
)
if self.train_kl:
obj = self._tensors["obj"]
self.losses['attr_kl'] = self.kl_weight * tf_mean_sum(obj * self._tensors["attr_kl"])
# --- other evaluation metrics
if "n_annotations" in self._tensors:
count_1norm = tf.to_float(
tf.abs(tf.to_int32(self._tensors["n_objects"]) - self._tensors["n_annotations"]))
self.record_tensors(
count_1norm=count_1norm,
count_error=count_1norm > 0.5
)
class Baseline_RenderHook(yolo_air.YoloAir_RenderHook):
fetches = "obj inp output objects n_objects normalized_box glimpse"
def _plot_patches(self, updater, fetched, N):
# Create a plot showing what each object is generating
import matplotlib.pyplot as plt
glimpse = fetched.get('glimpse', None)
objects = fetched['objects']
obj = fetched['obj']
n_objects = obj.sum(axis=(1, 2)).astype('i')
on_colour = np.array(to_rgb("xkcd:azure"))
off_colour = np.array(to_rgb("xkcd:red"))
for idx in range(N):
no = n_objects[idx]
fig, axes = plt.subplots(2, no, figsize=(20, 20))
axes = np.array(axes).reshape(2, no)
for i in range(no):
_obj = obj[idx, i, 0]
ax = axes[0, i]
ax.imshow(objects[idx, i, :, :, :], vmin=0.0, vmax=1.0)
colour = _obj * on_colour + (1-_obj) * off_colour
obj_rect = patches.Rectangle(
(1, 0), 0.2, 1, clip_on=False, transform=ax.transAxes, facecolor=colour)
ax.add_patch(obj_rect)
ax = axes[1, i]
ax.set_title("input glimpse")
ax.imshow(glimpse[idx, i, :, :, :], vmin=0.0, vmax=1.0)
plt.subplots_adjust(left=0.02, right=.98, top=.98, bottom=0.02, wspace=0.1, hspace=0.1)
local_step = np.inf if cfg.overwrite_plots else "{:0>10}".format(updater.n_updates)
path = updater.exp_dir.path_for(
'plots',
'sampled_patches', str(idx),
'stage={:0>4}_local_step={}.pdf'.format(updater.stage_idx, local_step))
fig.savefig(path)
plt.close(fig)
shutil.copyfile(
path,
os.path.join(os.path.dirname(path), 'latest_stage{:0>4}.pdf'.format(updater.stage_idx)))
| [
"tensorflow.reduce_sum",
"tensorflow.clip_by_value",
"tensorflow.cumsum",
"tensorflow.identity",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.zeros_like",
"auto_yolo.tf_ops.render_sprites.render_sprites",
"dps.cfg.build_object_decoder",
"tensorflow.reduce_max",
"tensorflow.split",
"... | [((1795, 1802), 'dps.utils.Param', 'Param', ([], {}), '()\n', (1800, 1802), False, 'from dps.utils import Param\n'), ((1822, 1829), 'dps.utils.Param', 'Param', ([], {}), '()\n', (1827, 1829), False, 'from dps.utils import Param\n'), ((1156, 1194), 'tensorflow.reduce_max', 'tf.reduce_max', (['nonzero_indices'], {'axis': '(0)'}), '(nonzero_indices, axis=0)\n', (1169, 1194), True, 'import tensorflow as tf\n'), ((1210, 1248), 'tensorflow.reduce_min', 'tf.reduce_min', (['nonzero_indices'], {'axis': '(0)'}), '(nonzero_indices, axis=0)\n', (1223, 1248), True, 'import tensorflow as tf\n'), ((1687, 1725), 'tensorflow.zeros', 'tf.zeros', (['(batch_size, 1)'], {'dtype': 'dtype'}), '((batch_size, 1), dtype=dtype)\n', (1695, 1725), True, 'import tensorflow as tf\n'), ((2141, 2158), 'auto_yolo.models.core.AP', 'AP', (['ap_iou_values'], {}), '(ap_iou_values)\n', (2143, 2158), False, 'from auto_yolo.models.core import xent_loss, AP, VariationalAutoencoder, normal_vae\n'), ((2452, 2495), 'tensorflow.contrib.image.connected_components', 'tf.contrib.image.connected_components', (['mask'], {}), '(mask)\n', (2489, 2495), True, 'import tensorflow as tf\n'), ((2580, 2612), 'tensorflow.range', 'tf.range', (['(1)', '(total_n_objects + 1)'], {}), '(1, total_n_objects + 1)\n', (2588, 2612), True, 'import tensorflow as tf\n'), ((2629, 2667), 'tensorflow.reduce_max', 'tf.reduce_max', (['components'], {'axis': '(1, 2)'}), '(components, axis=(1, 2))\n', (2642, 2667), True, 'import tensorflow as tf\n'), ((2822, 2858), 'tensorflow.reduce_min', 'tf.reduce_min', (['for_mins'], {'axis': '(1, 2)'}), '(for_mins, axis=(1, 2))\n', (2835, 2858), True, 'import tensorflow as tf\n'), ((3119, 3142), 'tensorflow.argmax', 'tf.argmax', (['both'], {'axis': '(0)'}), '(both, axis=0)\n', (3128, 3142), True, 'import tensorflow as tf\n'), ((4374, 4401), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['n_objects'], {}), '(n_objects)\n', (4390, 4401), True, 'import tensorflow as tf\n'), ((4474, 4508), 'tensorflow.cumsum', 'tf.cumsum', (['routing'], {'exclusive': '(True)'}), '(routing, exclusive=True)\n', (4483, 4508), True, 'import tensorflow as tf\n'), ((4576, 4604), 'tensorflow.to_float', 'tf.to_float', (['obj[:, :, None]'], {}), '(obj[:, :, None])\n', (4587, 4604), True, 'import tensorflow as tf\n'), ((4648, 4689), 'tensorflow.gather', 'tf.gather', (['object_bboxes', 'routing'], {'axis': '(0)'}), '(object_bboxes, routing, axis=0)\n', (4657, 4689), True, 'import tensorflow as tf\n'), ((4811, 4835), 'tensorflow.reduce_max', 'tf.reduce_max', (['n_objects'], {}), '(n_objects)\n', (4824, 4835), True, 'import tensorflow as tf\n'), ((5018, 5071), 'tensorflow.split', 'tf.split', (["self._tensors['normalized_box']", '(4)'], {'axis': '(-1)'}), "(self._tensors['normalized_box'], 4, axis=-1)\n", (5026, 5071), True, 'import tensorflow as tf\n'), ((5105, 5144), 'sonnet.AffineWarpConstraints.no_shear_2d', 'snt.AffineWarpConstraints.no_shear_2d', ([], {}), '()\n', (5142, 5144), True, 'import sonnet as snt\n'), ((5162, 5268), 'sonnet.AffineGridWarper', 'snt.AffineGridWarper', (['(self.image_height, self.image_width)', 'self.object_shape', 'transform_constraints'], {}), '((self.image_height, self.image_width), self.\n object_shape, transform_constraints)\n', (5182, 5268), True, 'import sonnet as snt\n'), ((5295, 5369), 'tensorflow.concat', 'tf.concat', (['[xs, 2 * (xt + xs / 2) - 1, ys, 2 * (yt + ys / 2) - 1]'], {'axis': '(-1)'}), '([xs, 2 * (xt + xs / 2) - 1, ys, 2 * (yt + ys / 2) - 1], axis=-1)\n', (5304, 5369), True, 'import tensorflow as tf\n'), ((5379, 5433), 'tensorflow.reshape', 'tf.reshape', (['_boxes', '(self.batch_size * max_objects, 4)'], {}), '(_boxes, (self.batch_size * max_objects, 4))\n', (5389, 5433), True, 'import tensorflow as tf\n'), ((5493, 5571), 'tensorflow.reshape', 'tf.reshape', (['grid_coords', '(self.batch_size, max_objects, *self.object_shape, 2)'], {}), '(grid_coords, (self.batch_size, max_objects, *self.object_shape, 2))\n', (5503, 5571), True, 'import tensorflow as tf\n'), ((5591, 5644), 'tensorflow.contrib.resampler.resampler', 'tf.contrib.resampler.resampler', (['self.inp', 'grid_coords'], {}), '(self.inp, grid_coords)\n', (5621, 5644), True, 'import tensorflow as tf\n'), ((5681, 5775), 'tensorflow.reshape', 'tf.reshape', (['glimpse', '(self.batch_size, max_objects, *self.object_shape, self.image_depth)'], {}), '(glimpse, (self.batch_size, max_objects, *self.object_shape, self\n .image_depth))\n', (5691, 5775), True, 'import tensorflow as tf\n'), ((5813, 5907), 'tensorflow.reshape', 'tf.reshape', (['glimpse', '(self.batch_size * max_objects, *self.object_shape, self.image_depth)'], {}), '(glimpse, (self.batch_size * max_objects, *self.object_shape,\n self.image_depth))\n', (5823, 5907), True, 'import tensorflow as tf\n'), ((6023, 6083), 'tensorflow.reshape', 'tf.reshape', (['attr', '(self.batch_size, max_objects, 2 * self.A)'], {}), '(attr, (self.batch_size, max_objects, 2 * self.A))\n', (6033, 6083), True, 'import tensorflow as tf\n'), ((6116, 6157), 'tensorflow.split', 'tf.split', (['attr', '[self.A, self.A]'], {'axis': '(-1)'}), '(attr, [self.A, self.A], axis=-1)\n', (6124, 6157), True, 'import tensorflow as tf\n'), ((6177, 6197), 'tensorflow.exp', 'tf.exp', (['attr_log_std'], {}), '(attr_log_std)\n', (6183, 6197), True, 'import tensorflow as tf\n'), ((6298, 6372), 'auto_yolo.models.core.normal_vae', 'normal_vae', (['attr_mean', 'attr_std', 'self.attr_prior_mean', 'self.attr_prior_std'], {}), '(attr_mean, attr_std, self.attr_prior_mean, self.attr_prior_std)\n', (6308, 6372), False, 'from auto_yolo.models.core import xent_loss, AP, VariationalAutoencoder, normal_vae\n'), ((6536, 6592), 'tensorflow.reshape', 'tf.reshape', (['attr', '(self.batch_size, max_objects, self.A)'], {}), '(attr, (self.batch_size, max_objects, self.A))\n', (6546, 6592), True, 'import tensorflow as tf\n'), ((6628, 6687), 'tensorflow.reshape', 'tf.reshape', (['attr_kl', '(self.batch_size, max_objects, self.A)'], {}), '(attr_kl, (self.batch_size, max_objects, self.A))\n', (6638, 6687), True, 'import tensorflow as tf\n'), ((6717, 6780), 'tensorflow.reshape', 'tf.reshape', (['attr', '(self.batch_size * max_objects, 1, 1, self.A)'], {}), '(attr, (self.batch_size * max_objects, 1, 1, self.A))\n', (6727, 6780), True, 'import tensorflow as tf\n'), ((7096, 7190), 'tensorflow.reshape', 'tf.reshape', (['objects', '(self.batch_size, max_objects, *self.object_shape, self.image_depth)'], {}), '(objects, (self.batch_size, max_objects, *self.object_shape, self\n .image_depth))\n', (7106, 7190), True, 'import tensorflow as tf\n'), ((7219, 7313), 'tensorflow.reshape', 'tf.reshape', (['objects', '(self.batch_size, max_objects, *self.object_shape, self.image_depth)'], {}), '(objects, (self.batch_size, max_objects, *self.object_shape, self\n .image_depth))\n', (7229, 7313), True, 'import tensorflow as tf\n'), ((7429, 7466), 'tensorflow.ones_like', 'tf.ones_like', (['objects[:, :, :, :, :1]'], {}), '(objects[:, :, :, :, :1])\n', (7441, 7466), True, 'import tensorflow as tf\n'), ((7485, 7533), 'tensorflow.concat', 'tf.concat', (['[objects, alpha, importance]'], {'axis': '(-1)'}), '([objects, alpha, importance], axis=-1)\n', (7494, 7533), True, 'import tensorflow as tf\n'), ((7588, 7616), 'tensorflow.concat', 'tf.concat', (['[ys, xs]'], {'axis': '(-1)'}), '([ys, xs], axis=-1)\n', (7597, 7616), True, 'import tensorflow as tf\n'), ((7634, 7687), 'tensorflow.reshape', 'tf.reshape', (['scales', '(self.batch_size, max_objects, 2)'], {}), '(scales, (self.batch_size, max_objects, 2))\n', (7644, 7687), True, 'import tensorflow as tf\n'), ((7707, 7735), 'tensorflow.concat', 'tf.concat', (['[yt, xt]'], {'axis': '(-1)'}), '([yt, xt], axis=-1)\n', (7716, 7735), True, 'import tensorflow as tf\n'), ((7754, 7808), 'tensorflow.reshape', 'tf.reshape', (['offsets', '(self.batch_size, max_objects, 2)'], {}), '(offsets, (self.batch_size, max_objects, 2))\n', (7764, 7808), True, 'import tensorflow as tf\n'), ((7827, 7943), 'auto_yolo.tf_ops.render_sprites.render_sprites', 'render_sprites.render_sprites', (['objects', "self._tensors['n_objects']", 'scales', 'offsets', "self._tensors['background']"], {}), "(objects, self._tensors['n_objects'], scales,\n offsets, self._tensors['background'])\n", (7856, 7943), False, 'from auto_yolo.tf_ops import render_sprites\n'), ((1087, 1138), 'tensorflow.equal', 'tf.equal', (['self.components[batch_idx, :, :]', 't[0, 0]'], {}), '(self.components[batch_idx, :, :], t[0, 0])\n', (1095, 1138), True, 'import tensorflow as tf\n'), ((2079, 2084), 'auto_yolo.models.core.AP', 'AP', (['v'], {}), '(v)\n', (2081, 2084), False, 'from auto_yolo.models.core import xent_loss, AP, VariationalAutoencoder, normal_vae\n'), ((2535, 2560), 'tensorflow.reduce_max', 'tf.reduce_max', (['components'], {}), '(components)\n', (2548, 2560), True, 'import tensorflow as tf\n'), ((2892, 2922), 'tensorflow.maximum', 'tf.maximum', (['(maxs - mins + 1)', '(0)'], {}), '(maxs - mins + 1, 0)\n', (2902, 2922), True, 'import tensorflow as tf\n'), ((3054, 3081), 'tensorflow.logical_and', 'tf.logical_and', (['under', 'over'], {}), '(under, over)\n', (3068, 3081), True, 'import tensorflow as tf\n'), ((3330, 3383), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[assert_valid_batch_indices]'], {}), '([assert_valid_batch_indices])\n', (3353, 3383), True, 'import tensorflow as tf\n'), ((3425, 3463), 'tensorflow.identity', 'tf.identity', (['batch_indices_for_objects'], {}), '(batch_indices_for_objects)\n', (3436, 3463), True, 'import tensorflow as tf\n'), ((4431, 4447), 'tensorflow.to_int32', 'tf.to_int32', (['obj'], {}), '(obj)\n', (4442, 4447), True, 'import tensorflow as tf\n'), ((4547, 4560), 'tensorflow.shape', 'tf.shape', (['obj'], {}), '(obj)\n', (4555, 4560), True, 'import tensorflow as tf\n'), ((6249, 6272), 'tensorflow.zeros_like', 'tf.zeros_like', (['attr_std'], {}), '(attr_std)\n', (6262, 6272), True, 'import tensorflow as tf\n'), ((6432, 6454), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['attr'], {}), '(attr)\n', (6448, 6454), True, 'import tensorflow as tf\n'), ((6477, 6502), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['attr_kl'], {}), '(attr_kl)\n', (6493, 6502), True, 'import tensorflow as tf\n'), ((7016, 7060), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['object_logits', '(-10.0)', '(10.0)'], {}), '(object_logits, -10.0, 10.0)\n', (7032, 7060), True, 'import tensorflow as tf\n'), ((7370, 7407), 'tensorflow.ones_like', 'tf.ones_like', (['objects[:, :, :, :, :1]'], {}), '(objects[:, :, :, :, :1])\n', (7382, 7407), True, 'import tensorflow as tf\n'), ((8236, 8284), 'dps.cfg.build_object_encoder', 'cfg.build_object_encoder', ([], {'scope': '"""object_encoder"""'}), "(scope='object_encoder')\n", (8260, 8284), False, 'from dps import cfg\n'), ((8467, 8515), 'dps.cfg.build_object_decoder', 'cfg.build_object_decoder', ([], {'scope': '"""object_decoder"""'}), "(scope='object_decoder')\n", (8491, 8515), False, 'from dps import cfg\n'), ((9049, 9082), 'auto_yolo.models.core.xent_loss', 'xent_loss', ([], {'pred': 'output', 'label': 'inp'}), '(pred=output, label=inp)\n', (9058, 9082), False, 'from auto_yolo.models.core import xent_loss, AP, VariationalAutoencoder, normal_vae\n'), ((10266, 10286), 'matplotlib.colors.to_rgb', 'to_rgb', (['"""xkcd:azure"""'], {}), "('xkcd:azure')\n", (10272, 10286), False, 'from matplotlib.colors import to_rgb\n'), ((10318, 10336), 'matplotlib.colors.to_rgb', 'to_rgb', (['"""xkcd:red"""'], {}), "('xkcd:red')\n", (10324, 10336), False, 'from matplotlib.colors import to_rgb\n'), ((10424, 10461), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', 'no'], {'figsize': '(20, 20)'}), '(2, no, figsize=(20, 20))\n', (10436, 10461), True, 'import matplotlib.pyplot as plt\n'), ((11096, 11190), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.02)', 'right': '(0.98)', 'top': '(0.98)', 'bottom': '(0.02)', 'wspace': '(0.1)', 'hspace': '(0.1)'}), '(left=0.02, right=0.98, top=0.98, bottom=0.02, wspace=\n 0.1, hspace=0.1)\n', (11115, 11190), True, 'import matplotlib.pyplot as plt\n'), ((11527, 11541), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (11536, 11541), True, 'import matplotlib.pyplot as plt\n'), ((2354, 2400), 'tensorflow.abs', 'tf.abs', (["(self.inp - self._tensors['background'])"], {}), "(self.inp - self._tensors['background'])\n", (2360, 2400), True, 'import tensorflow as tf\n'), ((2781, 2805), 'tensorflow.ones_like', 'tf.ones_like', (['components'], {}), '(components)\n', (2793, 2805), True, 'import tensorflow as tf\n'), ((9190, 9249), 'dps.utils.tf.tf_mean_sum', 'tf_mean_sum', (["self._tensors['per_pixel_reconstruction_loss']"], {}), "(self._tensors['per_pixel_reconstruction_loss'])\n", (9201, 9249), False, 'from dps.utils.tf import RNNCell, tf_mean_sum\n'), ((9384, 9427), 'dps.utils.tf.tf_mean_sum', 'tf_mean_sum', (["(obj * self._tensors['attr_kl'])"], {}), "(obj * self._tensors['attr_kl'])\n", (9395, 9427), False, 'from dps.utils.tf import RNNCell, tf_mean_sum\n'), ((10780, 10874), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(1, 0)', '(0.2)', '(1)'], {'clip_on': '(False)', 'transform': 'ax.transAxes', 'facecolor': 'colour'}), '((1, 0), 0.2, 1, clip_on=False, transform=ax.transAxes,\n facecolor=colour)\n', (10797, 10874), True, 'import matplotlib.patches as patches\n'), ((1465, 1491), 'tensorflow.stack', 'tf.stack', (['[yt, xt, ys, xs]'], {}), '([yt, xt, ys, xs])\n', (1473, 1491), True, 'import tensorflow as tf\n'), ((3227, 3254), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['both'], {'axis': '(0)'}), '(both, axis=0)\n', (3240, 3254), True, 'import tensorflow as tf\n'), ((10481, 10495), 'numpy.array', 'np.array', (['axes'], {}), '(axes)\n', (10489, 10495), True, 'import numpy as np\n'), ((11623, 11644), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (11638, 11644), False, 'import os\n'), ((9576, 9615), 'tensorflow.to_int32', 'tf.to_int32', (["self._tensors['n_objects']"], {}), "(self._tensors['n_objects'])\n", (9587, 9615), True, 'import tensorflow as tf\n')] |
# clone.py
# Udacity Self-Driving Car Engineer
# Behavioral Cloning Project
# Script to import data saved from self-driving car simulator
# and train in a Keras neural network
import os
import csv
import cv2
import numpy as np
import tensorflow as tf
from sklearn.preprocessing import LabelBinarizer
from keras.models import Sequential
from keras.layers import Flatten, Dense, Activation, Dropout, Cropping2D
from keras.layers import Lambda
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.models import Model
import matplotlib.pyplot as plt
import sklearn
from sklearn.model_selection import train_test_split
# Generator function to save memory
def generator(samples, steering_correction, batch_size):
num_samples = len(samples)
# Loop forever so the generator never terminates
while True:
sklearn.utils.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset + batch_size]
# Get Camera Images and Steering Points from CSV file
# Augment dataset by flipping image horizontally and opposite angle.
images = [] # Image paths read from CSV file
measurements = [] # Steering measurements from CSV file
for batch_sample in batch_samples:
images.append(cv2.imread(batch_sample[0])) # Center Image
measurements.append(float(batch_sample[3]))
images.append(cv2.imread(batch_sample[1])) # Left Image
measurements.append(float(batch_sample[3]) + steering_correction)
images.append(cv2.imread(batch_sample[2])) # Right Image
measurements.append(float(batch_sample[3]) - steering_correction)
images.append(np.fliplr(cv2.imread(batch_sample[0]))) # Center Image Flipped
measurements.append(-1.0 * float(batch_sample[3]))
images.append(np.fliplr(cv2.imread(batch_sample[1]))) # Left Image Flipped
measurements.append((-1.0 * float(batch_sample[3])) - steering_correction)
images.append(np.fliplr(cv2.imread(batch_sample[2]))) # Left Image Flipped
measurements.append((-1.0 * float(batch_sample[3])) + steering_correction)
X_train = np.array(images)
y_train = np.array(measurements)
yield sklearn.utils.shuffle(X_train, y_train)
lines = [] # Lines read from CSV file
steering_correction = 0.065 # Steering correction factor from left/right cameras
batch_size = 128 # Batch size for training
# Read CSV file from local machine
with open('./data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
# Split 20% of training data set for test set
train_samples, validation_samples = train_test_split(lines, test_size = 0.2)
# Run generator function on training and validation datasets
train_generator = generator(train_samples, steering_correction = steering_correction, batch_size = batch_size)
validation_generator = generator(validation_samples, steering_correction = steering_correction, batch_size = batch_size)
# Model Neural Network
# Follows CNN architecture in NVIDIA's "End to End Learning for Self-Driving Cars"
# http://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
model = Sequential()
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape = (160, 320, 3)))
model.add(Cropping2D(cropping = ((65, 20), (0, 0))))
model.add(Conv2D(24, kernel_size = (5, 5), strides = (2, 2), activation = 'relu'))
model.add(Conv2D(36, kernel_size = (5, 5), strides = (2, 2), activation = 'relu'))
model.add(Conv2D(48, kernel_size = (5, 5), strides = (2, 2), activation = 'relu'))
model.add(Conv2D(64, kernel_size = (3, 3), activation = 'relu'))
model.add(Conv2D(64, kernel_size = (3, 3), activation = 'relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(500))
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
model.compile(loss = 'mse', optimizer = 'adam')
history_object = model.fit_generator(train_generator,
steps_per_epoch = np.ceil(len(train_samples)/batch_size),
validation_data = validation_generator,
validation_steps = np.ceil(len(validation_samples)/batch_size),
epochs = 5,
verbose = 1)
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('Mean Squared Error Loss vs. Training Epochs')
plt.ylabel('Mean Squared Error Loss')
plt.xlabel('Epoch')
plt.legend(['Training set', 'Validation set'], loc='upper right')
plt.show()
print("\nSteering Correction: %4.3f\n" % (steering_correction))
model.summary()
model.save('model.h5')
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"csv.reader",
"matplotlib.pyplot.plot",
"keras.layers.Cropping2D",
"sklearn.model_selection.train_test_split",
"keras.models.Sequential",
"matplotlib.pyplot.legend",
"keras.layers.Flatten",
"keras.layers.pooling.MaxPooling2D",
"cv2.imread",
... | [((3094, 3132), 'sklearn.model_selection.train_test_split', 'train_test_split', (['lines'], {'test_size': '(0.2)'}), '(lines, test_size=0.2)\n', (3110, 3132), False, 'from sklearn.model_selection import train_test_split\n'), ((3649, 3661), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3659, 3661), False, 'from keras.models import Sequential\n'), ((4944, 4984), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['loss']"], {}), "(history_object.history['loss'])\n", (4952, 4984), True, 'import matplotlib.pyplot as plt\n'), ((4985, 5029), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['val_loss']"], {}), "(history_object.history['val_loss'])\n", (4993, 5029), True, 'import matplotlib.pyplot as plt\n'), ((5030, 5086), 'matplotlib.pyplot.title', 'plt.title', (['"""Mean Squared Error Loss vs. Training Epochs"""'], {}), "('Mean Squared Error Loss vs. Training Epochs')\n", (5039, 5086), True, 'import matplotlib.pyplot as plt\n'), ((5087, 5124), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean Squared Error Loss"""'], {}), "('Mean Squared Error Loss')\n", (5097, 5124), True, 'import matplotlib.pyplot as plt\n'), ((5125, 5144), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (5135, 5144), True, 'import matplotlib.pyplot as plt\n'), ((5145, 5210), 'matplotlib.pyplot.legend', 'plt.legend', (["['Training set', 'Validation set']"], {'loc': '"""upper right"""'}), "(['Training set', 'Validation set'], loc='upper right')\n", (5155, 5210), True, 'import matplotlib.pyplot as plt\n'), ((5211, 5221), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5219, 5221), True, 'import matplotlib.pyplot as plt\n'), ((2940, 2959), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (2950, 2959), False, 'import csv\n'), ((3672, 3732), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (3678, 3732), False, 'from keras.layers import Lambda\n'), ((3748, 3787), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((65, 20), (0, 0))'}), '(cropping=((65, 20), (0, 0)))\n', (3758, 3787), False, 'from keras.layers import Flatten, Dense, Activation, Dropout, Cropping2D\n'), ((3801, 3866), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(24)'], {'kernel_size': '(5, 5)', 'strides': '(2, 2)', 'activation': '"""relu"""'}), "(24, kernel_size=(5, 5), strides=(2, 2), activation='relu')\n", (3807, 3866), False, 'from keras.layers.convolutional import Conv2D\n'), ((3884, 3949), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(36)'], {'kernel_size': '(5, 5)', 'strides': '(2, 2)', 'activation': '"""relu"""'}), "(36, kernel_size=(5, 5), strides=(2, 2), activation='relu')\n", (3890, 3949), False, 'from keras.layers.convolutional import Conv2D\n'), ((3967, 4032), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(48)'], {'kernel_size': '(5, 5)', 'strides': '(2, 2)', 'activation': '"""relu"""'}), "(48, kernel_size=(5, 5), strides=(2, 2), activation='relu')\n", (3973, 4032), False, 'from keras.layers.convolutional import Conv2D\n'), ((4050, 4099), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(64, kernel_size=(3, 3), activation='relu')\n", (4056, 4099), False, 'from keras.layers.convolutional import Conv2D\n'), ((4115, 4164), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(64, kernel_size=(3, 3), activation='relu')\n", (4121, 4164), False, 'from keras.layers.convolutional import Conv2D\n'), ((4180, 4194), 'keras.layers.pooling.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (4192, 4194), False, 'from keras.layers.pooling import MaxPooling2D\n'), ((4206, 4215), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4213, 4215), False, 'from keras.layers import Flatten, Dense, Activation, Dropout, Cropping2D\n'), ((4227, 4237), 'keras.layers.Dense', 'Dense', (['(500)'], {}), '(500)\n', (4232, 4237), False, 'from keras.layers import Flatten, Dense, Activation, Dropout, Cropping2D\n'), ((4249, 4259), 'keras.layers.Dense', 'Dense', (['(100)'], {}), '(100)\n', (4254, 4259), False, 'from keras.layers import Flatten, Dense, Activation, Dropout, Cropping2D\n'), ((4271, 4280), 'keras.layers.Dense', 'Dense', (['(50)'], {}), '(50)\n', (4276, 4280), False, 'from keras.layers import Flatten, Dense, Activation, Dropout, Cropping2D\n'), ((4292, 4301), 'keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (4297, 4301), False, 'from keras.layers import Flatten, Dense, Activation, Dropout, Cropping2D\n'), ((4313, 4321), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (4318, 4321), False, 'from keras.layers import Flatten, Dense, Activation, Dropout, Cropping2D\n'), ((875, 905), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['samples'], {}), '(samples)\n', (896, 905), False, 'import sklearn\n'), ((2515, 2531), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (2523, 2531), True, 'import numpy as np\n'), ((2554, 2576), 'numpy.array', 'np.array', (['measurements'], {}), '(measurements)\n', (2562, 2576), True, 'import numpy as np\n'), ((2596, 2635), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (2617, 2635), False, 'import sklearn\n'), ((1437, 1464), 'cv2.imread', 'cv2.imread', (['batch_sample[0]'], {}), '(batch_sample[0])\n', (1447, 1464), False, 'import cv2\n'), ((1596, 1623), 'cv2.imread', 'cv2.imread', (['batch_sample[1]'], {}), '(batch_sample[1])\n', (1606, 1623), False, 'import cv2\n'), ((1775, 1802), 'cv2.imread', 'cv2.imread', (['batch_sample[2]'], {}), '(batch_sample[2])\n', (1785, 1802), False, 'import cv2\n'), ((1966, 1993), 'cv2.imread', 'cv2.imread', (['batch_sample[0]'], {}), '(batch_sample[0])\n', (1976, 1993), False, 'import cv2\n'), ((2140, 2167), 'cv2.imread', 'cv2.imread', (['batch_sample[1]'], {}), '(batch_sample[1])\n', (2150, 2167), False, 'import cv2\n'), ((2336, 2363), 'cv2.imread', 'cv2.imread', (['batch_sample[2]'], {}), '(batch_sample[2])\n', (2346, 2363), False, 'import cv2\n')] |
import numpy as np
import nibabel as nb
import os
import sys
import nighresjava
from ..io import load_volume, save_volume
from ..utils import _output_dir_4saving, _fname_4saving, \
_check_topology_lut_dir, _check_atlas_file, _check_available_memory
def filter_ridge_structures(input_image,
structure_intensity='bright',
output_type='probability',
use_strict_min_max_filter=True,
save_data=False, overwrite=False, output_dir=None,
file_name=None):
""" Filter Ridge Structures
Uses an image filter to make a probabilistic image of ridge
structures.
Parameters
----------
input_image: niimg
Image containing structure-of-interest
structure_intensity: {'bright', 'dark', 'both}
Image intensity of structure-of-interest'
output_type: {'probability','intensity'}
Whether the image should be normalized to reflect probabilities
use_strict_min_max_filter: bool, optional
Choose between the more specific recursive ridge filter or a more
sensitive bidirectional filter (default is True)
save_data: bool, optional
Save output data to file (default is False)
overwrite: bool
Overwrite existing results (default is False)
output_dir: str, optional
Path to desired output directory, will be created if it doesn't exist
file_name: str, optional
Desired base name for output files with file extension
(suffixes will be added)
Returns
----------
dict
Dictionary collecting outputs under the following keys
(suffix of output files in brackets)
* ridge_structure_image: Image that reflects the presensence of ridges
in the image (_rdg-img)
Notes
----------
Original Java module by <NAME>.
"""
if save_data:
output_dir = _output_dir_4saving(output_dir, input_image)
ridge_file = os.path.join(output_dir,
_fname_4saving(file_name=file_name,
rootfile=input_image,
suffix='rdg-img', ))
if overwrite is False \
and os.path.isfile(ridge_file) :
print("skip computation (use existing results)")
output = {'result': load_volume(ridge_file)}
return output
# start virtual machine, if not already running
try:
mem = _check_available_memory()
nighresjava.initVM(initialheap=mem['init'], maxheap=mem['max'])
except ValueError:
pass
# create algorithm instance
filter_ridge = nighresjava.FilterRidgeStructures()
# set parameters
filter_ridge.setStructureIntensity(structure_intensity)
filter_ridge.setOutputType(output_type)
filter_ridge.setUseStrictMinMaxFilter(use_strict_min_max_filter)
# load images and set dimensions and resolution
input_image = load_volume(input_image)
data = input_image.get_data()
affine = input_image.affine
header = input_image.header
resolution = [x.item() for x in header.get_zooms()]
dimensions = input_image.shape
filter_ridge.setDimensions(dimensions[0], dimensions[1], dimensions[2])
filter_ridge.setResolutions(resolution[0], resolution[1], resolution[2])
data = load_volume(input_image).get_data()
filter_ridge.setInputImage(nighresjava.JArray('float')(
(data.flatten('F')).astype(float)))
# execute
try:
filter_ridge.execute()
except:
# if the Java module fails, reraise the error it throws
print("\n The underlying Java code did not execute cleanly: ")
print(sys.exc_info()[0])
raise
return
# Collect output
ridge_structure_image_data = np.reshape(np.array(
filter_ridge.getRidgeStructureImage(),
dtype=np.float32), dimensions, 'F')
if output_type == 'probability':
header['cal_min'] = 0.0
header['cal_max'] = 1.0
else:
header['cal_min'] = np.nanmin(ridge_structure_image_data)
header['cal_max'] = np.nanmax(ridge_structure_image_data)
ridge_structure_image = nb.Nifti1Image(ridge_structure_image_data, affine,
header)
outputs = {'result': ridge_structure_image}
if save_data:
save_volume(ridge_file, ridge_structure_image)
return outputs
| [
"nibabel.Nifti1Image",
"nighresjava.initVM",
"numpy.nanmin",
"os.path.isfile",
"nighresjava.JArray",
"sys.exc_info",
"nighresjava.FilterRidgeStructures",
"numpy.nanmax"
] | [((2717, 2752), 'nighresjava.FilterRidgeStructures', 'nighresjava.FilterRidgeStructures', ([], {}), '()\n', (2750, 2752), False, 'import nighresjava\n'), ((4326, 4384), 'nibabel.Nifti1Image', 'nb.Nifti1Image', (['ridge_structure_image_data', 'affine', 'header'], {}), '(ridge_structure_image_data, affine, header)\n', (4340, 4384), True, 'import nibabel as nb\n'), ((2566, 2629), 'nighresjava.initVM', 'nighresjava.initVM', ([], {'initialheap': "mem['init']", 'maxheap': "mem['max']"}), "(initialheap=mem['init'], maxheap=mem['max'])\n", (2584, 2629), False, 'import nighresjava\n'), ((4193, 4230), 'numpy.nanmin', 'np.nanmin', (['ridge_structure_image_data'], {}), '(ridge_structure_image_data)\n', (4202, 4230), True, 'import numpy as np\n'), ((4259, 4296), 'numpy.nanmax', 'np.nanmax', (['ridge_structure_image_data'], {}), '(ridge_structure_image_data)\n', (4268, 4296), True, 'import numpy as np\n'), ((2282, 2308), 'os.path.isfile', 'os.path.isfile', (['ridge_file'], {}), '(ridge_file)\n', (2296, 2308), False, 'import os\n'), ((3468, 3495), 'nighresjava.JArray', 'nighresjava.JArray', (['"""float"""'], {}), "('float')\n", (3486, 3495), False, 'import nighresjava\n'), ((3782, 3796), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3794, 3796), False, 'import sys\n')] |
from __future__ import division
import os
import sys
import logging
import torch
import numpy as np
import cv2
import torch.nn.functional as F
from thop import profile
sys.path.append("./")
from transform import ToTensor
from utils.darts_utils import create_exp_dir, plot_op, plot_path_width, objective_acc_lat
try:
from utils.darts_utils import compute_latency_ms_tensorrt as compute_latency
print("use TensorRT for latency test")
except:
from utils.darts_utils import compute_latency_ms_pytorch as compute_latency
print("use PyTorch for latency test")
# from utils.darts_utils import compute_latency_ms_pytorch as compute_latency
# print("use PyTorch for latency test")
try:
from lib.models.model_stages_trt import BiSeNet
except:
from lib.models.model_stages import BiSeNet
print("No TensorRT")
def main():
print("begin")
# preparation ################
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
seed = 12345
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
# Configuration ##############
use_boundary_2 = False
use_boundary_4 = False
use_boundary_8 = True
use_boundary_16 = False
use_conv_last = False
n_classes = 4 #mgchen
# case1.STDC1Seg-50 250.4FPS on NVIDIA GTX 1080Ti
backbone = 'STDCNet813'
methodName = 'train_STDC1-Seg-20211118'
inputSize = 192
inputScale = 50
inputDimension = (1, 3, 192, 192)
# case2.STDC1Seg-75 126.7FPS on NVIDIA GTX 1080Ti
# backbone = 'STDCNet813'
# methodName = 'STDC1-Seg'
# inputSize = 768
# inputScale = 75
# inputDimension = (1, 3, 768, 1536)
# case3.STDC2Seg-50 188.6FPS on NVIDIA GTX 1080Ti
# backbone = 'STDCNet1446'
# methodName = 'STDC2-Seg'
# inputSize = 512
# inputScale = 50
# inputDimension = (1, 3, 512, 1024)
# case4.STDC2Seg-75 97.0FPS on NVIDIA GTX 1080Ti
# backbone = 'STDCNet1446'
# methodName = 'STDC2-Seg'
# inputSize = 768
# inputScale = 75
# inputDimension = (1, 3, 768, 1536)
model = BiSeNet(backbone=backbone, n_classes=n_classes,
use_boundary_2=use_boundary_2, use_boundary_4=use_boundary_4,
use_boundary_8=use_boundary_8, use_boundary_16=use_boundary_16,
input_size=inputSize, use_conv_last=use_conv_last)
print('loading parameters...')
respth = './checkpoints/{}/pths/'.format(methodName)
save_pth = os.path.join(respth, 'model_final.pth')
model.load_state_dict(torch.load(save_pth))
model.eval()
model.cuda()
#####################################################
palette = np.random.randint(0, 256, (256, 3), dtype=np.uint8)
to_tensor = ToTensor(
mean=(0.3257, 0.3690, 0.3223), # city, rgb
std=(0.2112, 0.2148, 0.2115),
)
im = cv2.imread("/root/chenguang/project/STDC-Seg-master/data/shiliu/leftImg8bit/val/shiliu/20210505095657752_leftImg8bit.png")[:, :, ::-1]
im = to_tensor(dict(im=im, lb=None))['im'].unsqueeze(0).cuda()
# inference
# out = model(im).squeeze().detach().cpu().numpy().astype('int64')
out = model(im)[0]
prob = F.softmax(out, 1).detach().cpu().numpy().astype('int64')
print("out")
print(out.shape)
print(prob.shape)
print(prob)
pred = palette[prob]
print("pred")
print()
cv2.imwrite('./res.jpg', pred)
print("access!")
# latency = compute_latency(model, inputDimension)
# print("{}{} FPS:".format(methodName, inputScale) + str(1000./latency))
# logging.info("{}{} FPS:".format(methodName, inputScale) + str(1000./latency))
# calculate FLOPS and params
'''
model = model.cpu()
flops, params = profile(model, inputs=(torch.randn(inputDimension),), verbose=False)
print("params = {}MB, FLOPs = {}GB".format(params / 1e6, flops / 1e9))
logging.info("params = {}MB, FLOPs = {}GB".format(params / 1e6, flops / 1e9))
'''
if __name__ == '__main__':
main()
| [
"sys.path.append",
"lib.models.model_stages.BiSeNet",
"numpy.random.seed",
"torch.manual_seed",
"cv2.imwrite",
"torch.load",
"torch.cuda.manual_seed",
"torch.nn.functional.softmax",
"cv2.imread",
"numpy.random.randint",
"torch.cuda.is_available",
"transform.ToTensor",
"os.path.join"
] | [((171, 192), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (186, 192), False, 'import sys\n'), ((1009, 1029), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1023, 1029), True, 'import numpy as np\n'), ((1034, 1057), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1051, 1057), False, 'import torch\n'), ((1065, 1090), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1088, 1090), False, 'import torch\n'), ((2160, 2398), 'lib.models.model_stages.BiSeNet', 'BiSeNet', ([], {'backbone': 'backbone', 'n_classes': 'n_classes', 'use_boundary_2': 'use_boundary_2', 'use_boundary_4': 'use_boundary_4', 'use_boundary_8': 'use_boundary_8', 'use_boundary_16': 'use_boundary_16', 'input_size': 'inputSize', 'use_conv_last': 'use_conv_last'}), '(backbone=backbone, n_classes=n_classes, use_boundary_2=\n use_boundary_2, use_boundary_4=use_boundary_4, use_boundary_8=\n use_boundary_8, use_boundary_16=use_boundary_16, input_size=inputSize,\n use_conv_last=use_conv_last)\n', (2167, 2398), False, 'from lib.models.model_stages import BiSeNet\n'), ((2513, 2552), 'os.path.join', 'os.path.join', (['respth', '"""model_final.pth"""'], {}), "(respth, 'model_final.pth')\n", (2525, 2552), False, 'import os\n'), ((2708, 2759), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '(256, 3)'], {'dtype': 'np.uint8'}), '(0, 256, (256, 3), dtype=np.uint8)\n', (2725, 2759), True, 'import numpy as np\n'), ((2777, 2845), 'transform.ToTensor', 'ToTensor', ([], {'mean': '(0.3257, 0.369, 0.3223)', 'std': '(0.2112, 0.2148, 0.2115)'}), '(mean=(0.3257, 0.369, 0.3223), std=(0.2112, 0.2148, 0.2115))\n', (2785, 2845), False, 'from transform import ToTensor\n'), ((3402, 3432), 'cv2.imwrite', 'cv2.imwrite', (['"""./res.jpg"""', 'pred'], {}), "('./res.jpg', pred)\n", (3413, 3432), False, 'import cv2\n'), ((1100, 1128), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (1122, 1128), False, 'import torch\n'), ((2579, 2599), 'torch.load', 'torch.load', (['save_pth'], {}), '(save_pth)\n', (2589, 2599), False, 'import torch\n'), ((2883, 3015), 'cv2.imread', 'cv2.imread', (['"""/root/chenguang/project/STDC-Seg-master/data/shiliu/leftImg8bit/val/shiliu/20210505095657752_leftImg8bit.png"""'], {}), "(\n '/root/chenguang/project/STDC-Seg-master/data/shiliu/leftImg8bit/val/shiliu/20210505095657752_leftImg8bit.png'\n )\n", (2893, 3015), False, 'import cv2\n'), ((3206, 3223), 'torch.nn.functional.softmax', 'F.softmax', (['out', '(1)'], {}), '(out, 1)\n', (3215, 3223), True, 'import torch.nn.functional as F\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
import unittest
import numpy as np
import paddle
import scipy.special
import scipy.stats
from paddle.distribution import kl
import config
import mock_data as mock
import parameterize as param
paddle.set_default_dtype('float64')
@param.place(config.DEVICES)
@param.parameterize_cls((param.TEST_CASE_NAME, 'a1', 'b1', 'a2', 'b2'), [
('test_regular_input', 6.0 * np.random.random(
(4, 5)) + 1e-4, 6.0 * np.random.random(
(4, 5)) + 1e-4, 6.0 * np.random.random(
(4, 5)) + 1e-4, 6.0 * np.random.random((4, 5)) + 1e-4),
])
class TestKLBetaBeta(unittest.TestCase):
def setUp(self):
self.p = paddle.distribution.Beta(paddle.to_tensor(self.a1),
paddle.to_tensor(self.b1))
self.q = paddle.distribution.Beta(paddle.to_tensor(self.a2),
paddle.to_tensor(self.b2))
def test_kl_divergence(self):
with paddle.fluid.dygraph.guard(self.place):
np.testing.assert_allclose(
paddle.distribution.kl_divergence(self.p, self.q),
self.scipy_kl_beta_beta(self.a1, self.b1, self.a2, self.b2),
rtol=config.RTOL.get(str(self.a1.dtype)),
atol=config.ATOL.get(str(self.a1.dtype)))
def scipy_kl_beta_beta(self, a1, b1, a2, b2):
return (scipy.special.betaln(a2, b2) - scipy.special.betaln(a1, b1) +
(a1 - a2) * scipy.special.digamma(a1) +
(b1 - b2) * scipy.special.digamma(b1) +
(a2 - a1 + b2 - b1) * scipy.special.digamma(a1 + b1))
@param.place(config.DEVICES)
@param.param_cls((param.TEST_CASE_NAME, 'conc1', 'conc2'), [
('test-regular-input', np.random.random(
(5, 7, 8, 10)), np.random.random((5, 7, 8, 10))),
])
class TestKLDirichletDirichlet(unittest.TestCase):
def setUp(self):
self.p = paddle.distribution.Dirichlet(paddle.to_tensor(self.conc1))
self.q = paddle.distribution.Dirichlet(paddle.to_tensor(self.conc2))
def test_kl_divergence(self):
with paddle.fluid.dygraph.guard(self.place):
np.testing.assert_allclose(
paddle.distribution.kl_divergence(self.p, self.q),
self.scipy_kl_diric_diric(self.conc1, self.conc2),
rtol=config.RTOL.get(str(self.conc1.dtype)),
atol=config.ATOL.get(str(self.conc1.dtype)))
def scipy_kl_diric_diric(self, conc1, conc2):
return (
scipy.special.gammaln(np.sum(conc1, -1)) -
scipy.special.gammaln(np.sum(conc2, -1)) - np.sum(
scipy.special.gammaln(conc1) - scipy.special.gammaln(conc2), -1)
+ np.sum(
(conc1 - conc2) *
(scipy.special.digamma(conc1) -
scipy.special.digamma(np.sum(conc1, -1, keepdims=True))), -1))
class DummyDistribution(paddle.distribution.Distribution):
pass
@param.place(config.DEVICES)
@param.param_cls((param.TEST_CASE_NAME, 'p', 'q'),
[('test-unregister', DummyDistribution(), DummyDistribution)])
class TestDispatch(unittest.TestCase):
def test_dispatch_with_unregister(self):
with self.assertRaises(NotImplementedError):
paddle.distribution.kl_divergence(self.p, self.q)
@param.place(config.DEVICES)
@param.param_cls(
(param.TEST_CASE_NAME, 'p', 'q'),
[('test-diff-dist', mock.Exponential(paddle.rand((100, 200, 100)) + 1.0),
mock.Exponential(paddle.rand((100, 200, 100)) + 2.0)),
('test-same-dist', mock.Exponential(
paddle.to_tensor(1.0)), mock.Exponential(paddle.to_tensor(1.0)))])
class TestKLExpfamilyExpFamily(unittest.TestCase):
def test_kl_expfamily_expfamily(self):
np.testing.assert_allclose(paddle.distribution.kl_divergence(
self.p, self.q),
kl._kl_expfamily_expfamily(self.p, self.q),
rtol=config.RTOL.get(config.DEFAULT_DTYPE),
atol=config.ATOL.get(config.DEFAULT_DTYPE))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.sum",
"paddle.distribution.kl._kl_expfamily_expfamily",
"config.RTOL.get",
"paddle.distribution.kl_divergence",
"paddle.fluid.dygraph.guard",
"numpy.random.random",
"paddle.set_default_dtype",
"config.ATOL.get",
"paddle.rand",
"paddle.to_tensor",
"parameterize.place"
] | [((821, 856), 'paddle.set_default_dtype', 'paddle.set_default_dtype', (['"""float64"""'], {}), "('float64')\n", (845, 856), False, 'import paddle\n'), ((860, 887), 'parameterize.place', 'param.place', (['config.DEVICES'], {}), '(config.DEVICES)\n', (871, 887), True, 'import parameterize as param\n'), ((2229, 2256), 'parameterize.place', 'param.place', (['config.DEVICES'], {}), '(config.DEVICES)\n', (2240, 2256), True, 'import parameterize as param\n'), ((3559, 3586), 'parameterize.place', 'param.place', (['config.DEVICES'], {}), '(config.DEVICES)\n', (3570, 3586), True, 'import parameterize as param\n'), ((3921, 3948), 'parameterize.place', 'param.place', (['config.DEVICES'], {}), '(config.DEVICES)\n', (3932, 3948), True, 'import parameterize as param\n'), ((4726, 4741), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4739, 4741), False, 'import unittest\n'), ((1293, 1318), 'paddle.to_tensor', 'paddle.to_tensor', (['self.a1'], {}), '(self.a1)\n', (1309, 1318), False, 'import paddle\n'), ((1362, 1387), 'paddle.to_tensor', 'paddle.to_tensor', (['self.b1'], {}), '(self.b1)\n', (1378, 1387), False, 'import paddle\n'), ((1431, 1456), 'paddle.to_tensor', 'paddle.to_tensor', (['self.a2'], {}), '(self.a2)\n', (1447, 1456), False, 'import paddle\n'), ((1500, 1525), 'paddle.to_tensor', 'paddle.to_tensor', (['self.b2'], {}), '(self.b2)\n', (1516, 1525), False, 'import paddle\n'), ((1575, 1613), 'paddle.fluid.dygraph.guard', 'paddle.fluid.dygraph.guard', (['self.place'], {}), '(self.place)\n', (1601, 1613), False, 'import paddle\n'), ((2544, 2572), 'paddle.to_tensor', 'paddle.to_tensor', (['self.conc1'], {}), '(self.conc1)\n', (2560, 2572), False, 'import paddle\n'), ((2621, 2649), 'paddle.to_tensor', 'paddle.to_tensor', (['self.conc2'], {}), '(self.conc2)\n', (2637, 2649), False, 'import paddle\n'), ((2699, 2737), 'paddle.fluid.dygraph.guard', 'paddle.fluid.dygraph.guard', (['self.place'], {}), '(self.place)\n', (2725, 2737), False, 'import paddle\n'), ((2345, 2376), 'numpy.random.random', 'np.random.random', (['(5, 7, 8, 10)'], {}), '((5, 7, 8, 10))\n', (2361, 2376), True, 'import numpy as np\n'), ((2387, 2418), 'numpy.random.random', 'np.random.random', (['(5, 7, 8, 10)'], {}), '((5, 7, 8, 10))\n', (2403, 2418), True, 'import numpy as np\n'), ((3868, 3917), 'paddle.distribution.kl_divergence', 'paddle.distribution.kl_divergence', (['self.p', 'self.q'], {}), '(self.p, self.q)\n', (3901, 3917), False, 'import paddle\n'), ((4392, 4441), 'paddle.distribution.kl_divergence', 'paddle.distribution.kl_divergence', (['self.p', 'self.q'], {}), '(self.p, self.q)\n', (4425, 4441), False, 'import paddle\n'), ((4491, 4533), 'paddle.distribution.kl._kl_expfamily_expfamily', 'kl._kl_expfamily_expfamily', (['self.p', 'self.q'], {}), '(self.p, self.q)\n', (4517, 4533), False, 'from paddle.distribution import kl\n'), ((1671, 1720), 'paddle.distribution.kl_divergence', 'paddle.distribution.kl_divergence', (['self.p', 'self.q'], {}), '(self.p, self.q)\n', (1704, 1720), False, 'import paddle\n'), ((2795, 2844), 'paddle.distribution.kl_divergence', 'paddle.distribution.kl_divergence', (['self.p', 'self.q'], {}), '(self.p, self.q)\n', (2828, 2844), False, 'import paddle\n'), ((4575, 4612), 'config.RTOL.get', 'config.RTOL.get', (['config.DEFAULT_DTYPE'], {}), '(config.DEFAULT_DTYPE)\n', (4590, 4612), False, 'import config\n'), ((4654, 4691), 'config.ATOL.get', 'config.ATOL.get', (['config.DEFAULT_DTYPE'], {}), '(config.DEFAULT_DTYPE)\n', (4669, 4691), False, 'import config\n'), ((4195, 4216), 'paddle.to_tensor', 'paddle.to_tensor', (['(1.0)'], {}), '(1.0)\n', (4211, 4216), False, 'import paddle\n'), ((4236, 4257), 'paddle.to_tensor', 'paddle.to_tensor', (['(1.0)'], {}), '(1.0)\n', (4252, 4257), False, 'import paddle\n'), ((995, 1019), 'numpy.random.random', 'np.random.random', (['(4, 5)'], {}), '((4, 5))\n', (1011, 1019), True, 'import numpy as np\n'), ((1043, 1067), 'numpy.random.random', 'np.random.random', (['(4, 5)'], {}), '((4, 5))\n', (1059, 1067), True, 'import numpy as np\n'), ((1095, 1119), 'numpy.random.random', 'np.random.random', (['(4, 5)'], {}), '((4, 5))\n', (1111, 1119), True, 'import numpy as np\n'), ((1151, 1175), 'numpy.random.random', 'np.random.random', (['(4, 5)'], {}), '((4, 5))\n', (1167, 1175), True, 'import numpy as np\n'), ((4046, 4074), 'paddle.rand', 'paddle.rand', (['(100, 200, 100)'], {}), '((100, 200, 100))\n', (4057, 4074), False, 'import paddle\n'), ((4106, 4134), 'paddle.rand', 'paddle.rand', (['(100, 200, 100)'], {}), '((100, 200, 100))\n', (4117, 4134), False, 'import paddle\n'), ((3137, 3154), 'numpy.sum', 'np.sum', (['conc1', '(-1)'], {}), '(conc1, -1)\n', (3143, 3154), True, 'import numpy as np\n'), ((3192, 3209), 'numpy.sum', 'np.sum', (['conc2', '(-1)'], {}), '(conc2, -1)\n', (3198, 3209), True, 'import numpy as np\n'), ((3445, 3477), 'numpy.sum', 'np.sum', (['conc1', '(-1)'], {'keepdims': '(True)'}), '(conc1, -1, keepdims=True)\n', (3451, 3477), True, 'import numpy as np\n')] |
import os
import unittest
import numpy as np
from prive.report import MIAttackSummary
class MiAttackSummaryTest(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.predictions = (np.random.randint(2, size=100))
self.labels = (np.random.randint(2, size=100))
self.attack_summary = MIAttackSummary(self.labels, self.predictions, 'Random', 'Groundhog', 'Test', "1")
def test_accuracy(self):
self.assertEqual(self.attack_summary.accuracy, np.mean(self.predictions == self.labels))
def test_fp(self):
self.assertEqual(self.attack_summary.fp, np.sum(self.predictions[np.where(self.labels == 0)[0]] == 1) / len(
np.where(self.labels == 0)[0]))
def test_tp(self):
self.assertEqual(self.attack_summary.tp, np.sum(self.predictions[np.where(self.labels == 1)[0]] == 1) / len(
np.where(self.labels == 1)[0]))
def test_mia_advantage(self):
self.assertEqual(round(self.attack_summary.mia_advantage - 0, 1), 0)
def test_privacy_gain(self):
self.assertEqual(round(self.attack_summary.privacy_gain - 1, 1), 0)
def test_get_metrics(self):
df = self.attack_summary.get_metrics()
self.assertFalse(df.empty)
def test_write_metrics(self):
path_dirname = os.path.dirname(__file__)
self.attack_summary.write_metrics(path_dirname, 10)
file_name = os.path.join(path_dirname, f'result_Test_Groundhog_Random_Target1_10.csv')
self.assertTrue(os.path.exists(file_name))
| [
"numpy.random.seed",
"os.path.dirname",
"prive.report.MIAttackSummary",
"os.path.exists",
"numpy.random.randint",
"numpy.mean",
"numpy.where",
"os.path.join"
] | [((163, 180), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (177, 180), True, 'import numpy as np\n'), ((209, 239), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(100)'}), '(2, size=100)\n', (226, 239), True, 'import numpy as np\n'), ((264, 294), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(100)'}), '(2, size=100)\n', (281, 294), True, 'import numpy as np\n'), ((326, 412), 'prive.report.MIAttackSummary', 'MIAttackSummary', (['self.labels', 'self.predictions', '"""Random"""', '"""Groundhog"""', '"""Test"""', '"""1"""'], {}), "(self.labels, self.predictions, 'Random', 'Groundhog',\n 'Test', '1')\n", (341, 412), False, 'from prive.report import MIAttackSummary\n'), ((1302, 1327), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1317, 1327), False, 'import os\n'), ((1409, 1483), 'os.path.join', 'os.path.join', (['path_dirname', 'f"""result_Test_Groundhog_Random_Target1_10.csv"""'], {}), "(path_dirname, f'result_Test_Groundhog_Random_Target1_10.csv')\n", (1421, 1483), False, 'import os\n'), ((494, 534), 'numpy.mean', 'np.mean', (['(self.predictions == self.labels)'], {}), '(self.predictions == self.labels)\n', (501, 534), True, 'import numpy as np\n'), ((1509, 1534), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (1523, 1534), False, 'import os\n'), ((689, 715), 'numpy.where', 'np.where', (['(self.labels == 0)'], {}), '(self.labels == 0)\n', (697, 715), True, 'import numpy as np\n'), ((874, 900), 'numpy.where', 'np.where', (['(self.labels == 1)'], {}), '(self.labels == 1)\n', (882, 900), True, 'import numpy as np\n'), ((633, 659), 'numpy.where', 'np.where', (['(self.labels == 0)'], {}), '(self.labels == 0)\n', (641, 659), True, 'import numpy as np\n'), ((818, 844), 'numpy.where', 'np.where', (['(self.labels == 1)'], {}), '(self.labels == 1)\n', (826, 844), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `deconvoluted` package."""
import unittest
import numpy as np
from deconvoluted import fourier_transform, inverse_fourier_transform
from deconvoluted.conventions import conventions, Convention
from deconvoluted.transforms import determine_norm
class TestDeconvoluted(unittest.TestCase):
"""Tests for `deconvoluted` package."""
def setUp(self):
x = np.linspace(-20, 20, 41)
y = np.linspace(-10, 10, 21)
X, Y = np.meshgrid(x, y)
f_xy = np.sin(0.2 * 2 * np.pi * X + 0.1 * 2 * np.pi * Y)
self.data2d = (f_xy, x, y)
# Test if these are really inverses.
F_pq, p, q = fourier_transform(f_xy, x, y)
self.ft_data2d = (F_pq, p, q)
# Generate 1D data
N = 60 # Number of sample points
T = 1.0 / 800.0 # sample spacing
x = np.linspace(- N * T, N * T, 2 * N + 1) # (- 0.75 , 0.75)
# Place a peak at 50 Hz and 80 Hz.
y = np.sin(50.0 * 2.0 * np.pi * x) + 0.5 * np.sin(
80.0 * 2.0 * np.pi * x)
self.data1d = (y, x)
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_ifft_2d_simultanious(self):
"""
Check the 2d ifft by performing the transform for both axes at the
same time.
"""
f_xy, x, y = self.data2d
F_pq, p, q = self.ft_data2d
# Test if these are really inverses.
f_xy_new, x_new, y_new = inverse_fourier_transform(F_pq, p, q)
np.testing.assert_almost_equal(x, x_new)
np.testing.assert_almost_equal(y, y_new)
np.testing.assert_almost_equal(f_xy, f_xy_new.real)
np.testing.assert_almost_equal(np.zeros_like(f_xy), f_xy_new.imag)
def test_ifft_2d_single(self):
"""
Test the API of a single axis transform on 2d data, by transforming
only one axis and then transforming it back.
"""
f_xy, x, y = self.data2d
# Test single transforms
F_py, p_new = fourier_transform(f_xy, x, None)
f_xy_new, x_new = inverse_fourier_transform(F_py, p_new, None)
np.testing.assert_almost_equal(x_new, x)
np.testing.assert_almost_equal(f_xy_new.real, f_xy)
def test_fft_2d_two_singles(self):
"""
Test the API of a single axis transform on 2d data by performing the
full 2d transform in two 1d steps.
"""
f_xy, x, y = self.data2d
F_pq, p, q = self.ft_data2d
# Behavior for every axis needs to be indicated.
with self.assertRaises(TypeError):
fourier_transform(f_xy, x)
# Test single transforms
F_py, p_new = fourier_transform(f_xy, x, None)
F_pq_new, q_new = fourier_transform(F_py, None, y)
np.testing.assert_almost_equal(p, p_new)
np.testing.assert_almost_equal(q, q_new)
np.testing.assert_almost_equal(F_pq_new, F_pq)
def test_ifft_2d_two_singles(self):
"""
Compute the 2d ifft by doing two singles.
:return:
"""
f_xy, x, y = self.data2d
F_pq, p, q = self.ft_data2d
# Behavior for every axis needs to be indicated.
with self.assertRaises(TypeError):
inverse_fourier_transform(F_pq, p)
# Do the inverse from two sides to see if the result matches
F_py_f, p_new = fourier_transform(f_xy, x, None) # Forward
F_py_b, y_new = inverse_fourier_transform(F_pq, None, q) # Backward
# Compare the intermediate
np.testing.assert_almost_equal(y, y_new)
np.testing.assert_almost_equal(p, p_new)
np.testing.assert_almost_equal(F_py_f, F_py_b)
# Compare the final state, which should be the initial state
f_xy_new, x_new = inverse_fourier_transform(F_py_b, p, None)
np.testing.assert_almost_equal(x, x_new)
np.testing.assert_almost_equal(y, y_new)
np.testing.assert_almost_equal(f_xy, f_xy_new.real)
np.testing.assert_almost_equal(np.zeros_like(f_xy), f_xy_new.imag)
def test_conventions(self):
y, x = self.data1d
F_signal, k_signal = fourier_transform(y, x)
# Test Plancherel theorem for default settings
self.assertAlmostEqual(np.linalg.norm(y)**2,
np.linalg.norm(F_signal)**2)
# Add something ridiculous purely to test our implementation
conventions.append(Convention(a=10, b=6))
for convention in conventions:
# Inner product norm, see Plancherel section here:
# https://www.johndcook.com/blog/fourier-theorems/
inner_norm = 1 / determine_norm(convention)**2
F, k = fourier_transform(y, x, convention=convention)
y_new, x_new = inverse_fourier_transform(F, k, convention=convention)
# Test Plancherel theorem
self.assertAlmostEqual(
np.linalg.norm(y)**2 / np.linalg.norm(F)**2 / inner_norm, 1.0
)
self.assertAlmostEqual(
np.linalg.norm(y_new)**2 / np.linalg.norm(F)**2 / inner_norm, 1.0
)
# Make sure we converted the frequency axis correctly
np.testing.assert_almost_equal(k_signal, k * convention.b / (- 2 * np.pi))
# F should be scaled properly
norm = np.sqrt(np.abs(convention.b) / (2 * np.pi)**(1 - convention.a))
np.testing.assert_almost_equal(F_signal, F / norm)
# After ifft + fft, we should be back to the start
np.testing.assert_almost_equal(x, x_new)
np.testing.assert_almost_equal(y, y_new.real)
np.testing.assert_almost_equal(np.zeros_like(y), y_new.imag)
| [
"numpy.meshgrid",
"numpy.zeros_like",
"numpy.abs",
"numpy.testing.assert_almost_equal",
"deconvoluted.transforms.determine_norm",
"deconvoluted.inverse_fourier_transform",
"deconvoluted.conventions.Convention",
"numpy.sin",
"deconvoluted.fourier_transform",
"numpy.linalg.norm",
"numpy.linspace"
... | [((430, 454), 'numpy.linspace', 'np.linspace', (['(-20)', '(20)', '(41)'], {}), '(-20, 20, 41)\n', (441, 454), True, 'import numpy as np\n'), ((467, 491), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(21)'], {}), '(-10, 10, 21)\n', (478, 491), True, 'import numpy as np\n'), ((507, 524), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (518, 524), True, 'import numpy as np\n'), ((540, 589), 'numpy.sin', 'np.sin', (['(0.2 * 2 * np.pi * X + 0.1 * 2 * np.pi * Y)'], {}), '(0.2 * 2 * np.pi * X + 0.1 * 2 * np.pi * Y)\n', (546, 589), True, 'import numpy as np\n'), ((691, 720), 'deconvoluted.fourier_transform', 'fourier_transform', (['f_xy', 'x', 'y'], {}), '(f_xy, x, y)\n', (708, 720), False, 'from deconvoluted import fourier_transform, inverse_fourier_transform\n'), ((883, 920), 'numpy.linspace', 'np.linspace', (['(-N * T)', '(N * T)', '(2 * N + 1)'], {}), '(-N * T, N * T, 2 * N + 1)\n', (894, 920), True, 'import numpy as np\n'), ((1487, 1524), 'deconvoluted.inverse_fourier_transform', 'inverse_fourier_transform', (['F_pq', 'p', 'q'], {}), '(F_pq, p, q)\n', (1512, 1524), False, 'from deconvoluted import fourier_transform, inverse_fourier_transform\n'), ((1534, 1574), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['x', 'x_new'], {}), '(x, x_new)\n', (1564, 1574), True, 'import numpy as np\n'), ((1583, 1623), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['y', 'y_new'], {}), '(y, y_new)\n', (1613, 1623), True, 'import numpy as np\n'), ((1632, 1683), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['f_xy', 'f_xy_new.real'], {}), '(f_xy, f_xy_new.real)\n', (1662, 1683), True, 'import numpy as np\n'), ((2037, 2069), 'deconvoluted.fourier_transform', 'fourier_transform', (['f_xy', 'x', 'None'], {}), '(f_xy, x, None)\n', (2054, 2069), False, 'from deconvoluted import fourier_transform, inverse_fourier_transform\n'), ((2096, 2140), 'deconvoluted.inverse_fourier_transform', 'inverse_fourier_transform', (['F_py', 'p_new', 'None'], {}), '(F_py, p_new, None)\n', (2121, 2140), False, 'from deconvoluted import fourier_transform, inverse_fourier_transform\n'), ((2149, 2189), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['x_new', 'x'], {}), '(x_new, x)\n', (2179, 2189), True, 'import numpy as np\n'), ((2198, 2249), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['f_xy_new.real', 'f_xy'], {}), '(f_xy_new.real, f_xy)\n', (2228, 2249), True, 'import numpy as np\n'), ((2699, 2731), 'deconvoluted.fourier_transform', 'fourier_transform', (['f_xy', 'x', 'None'], {}), '(f_xy, x, None)\n', (2716, 2731), False, 'from deconvoluted import fourier_transform, inverse_fourier_transform\n'), ((2758, 2790), 'deconvoluted.fourier_transform', 'fourier_transform', (['F_py', 'None', 'y'], {}), '(F_py, None, y)\n', (2775, 2790), False, 'from deconvoluted import fourier_transform, inverse_fourier_transform\n'), ((2800, 2840), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['p', 'p_new'], {}), '(p, p_new)\n', (2830, 2840), True, 'import numpy as np\n'), ((2849, 2889), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['q', 'q_new'], {}), '(q, q_new)\n', (2879, 2889), True, 'import numpy as np\n'), ((2898, 2944), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['F_pq_new', 'F_pq'], {}), '(F_pq_new, F_pq)\n', (2928, 2944), True, 'import numpy as np\n'), ((3388, 3420), 'deconvoluted.fourier_transform', 'fourier_transform', (['f_xy', 'x', 'None'], {}), '(f_xy, x, None)\n', (3405, 3420), False, 'from deconvoluted import fourier_transform, inverse_fourier_transform\n'), ((3456, 3496), 'deconvoluted.inverse_fourier_transform', 'inverse_fourier_transform', (['F_pq', 'None', 'q'], {}), '(F_pq, None, q)\n', (3481, 3496), False, 'from deconvoluted import fourier_transform, inverse_fourier_transform\n'), ((3553, 3593), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['y', 'y_new'], {}), '(y, y_new)\n', (3583, 3593), True, 'import numpy as np\n'), ((3602, 3642), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['p', 'p_new'], {}), '(p, p_new)\n', (3632, 3642), True, 'import numpy as np\n'), ((3651, 3697), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['F_py_f', 'F_py_b'], {}), '(F_py_f, F_py_b)\n', (3681, 3697), True, 'import numpy as np\n'), ((3794, 3836), 'deconvoluted.inverse_fourier_transform', 'inverse_fourier_transform', (['F_py_b', 'p', 'None'], {}), '(F_py_b, p, None)\n', (3819, 3836), False, 'from deconvoluted import fourier_transform, inverse_fourier_transform\n'), ((3845, 3885), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['x', 'x_new'], {}), '(x, x_new)\n', (3875, 3885), True, 'import numpy as np\n'), ((3894, 3934), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['y', 'y_new'], {}), '(y, y_new)\n', (3924, 3934), True, 'import numpy as np\n'), ((3943, 3994), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['f_xy', 'f_xy_new.real'], {}), '(f_xy, f_xy_new.real)\n', (3973, 3994), True, 'import numpy as np\n'), ((4159, 4182), 'deconvoluted.fourier_transform', 'fourier_transform', (['y', 'x'], {}), '(y, x)\n', (4176, 4182), False, 'from deconvoluted import fourier_transform, inverse_fourier_transform\n'), ((996, 1026), 'numpy.sin', 'np.sin', (['(50.0 * 2.0 * np.pi * x)'], {}), '(50.0 * 2.0 * np.pi * x)\n', (1002, 1026), True, 'import numpy as np\n'), ((1723, 1742), 'numpy.zeros_like', 'np.zeros_like', (['f_xy'], {}), '(f_xy)\n', (1736, 1742), True, 'import numpy as np\n'), ((2616, 2642), 'deconvoluted.fourier_transform', 'fourier_transform', (['f_xy', 'x'], {}), '(f_xy, x)\n', (2633, 2642), False, 'from deconvoluted import fourier_transform, inverse_fourier_transform\n'), ((3259, 3293), 'deconvoluted.inverse_fourier_transform', 'inverse_fourier_transform', (['F_pq', 'p'], {}), '(F_pq, p)\n', (3284, 3293), False, 'from deconvoluted import fourier_transform, inverse_fourier_transform\n'), ((4034, 4053), 'numpy.zeros_like', 'np.zeros_like', (['f_xy'], {}), '(f_xy)\n', (4047, 4053), True, 'import numpy as np\n'), ((4449, 4470), 'deconvoluted.conventions.Convention', 'Convention', ([], {'a': '(10)', 'b': '(6)'}), '(a=10, b=6)\n', (4459, 4470), False, 'from deconvoluted.conventions import conventions, Convention\n'), ((4716, 4762), 'deconvoluted.fourier_transform', 'fourier_transform', (['y', 'x'], {'convention': 'convention'}), '(y, x, convention=convention)\n', (4733, 4762), False, 'from deconvoluted import fourier_transform, inverse_fourier_transform\n'), ((4790, 4844), 'deconvoluted.inverse_fourier_transform', 'inverse_fourier_transform', (['F', 'k'], {'convention': 'convention'}), '(F, k, convention=convention)\n', (4815, 4844), False, 'from deconvoluted import fourier_transform, inverse_fourier_transform\n'), ((5223, 5296), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['k_signal', '(k * convention.b / (-2 * np.pi))'], {}), '(k_signal, k * convention.b / (-2 * np.pi))\n', (5253, 5296), True, 'import numpy as np\n'), ((5435, 5485), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['F_signal', '(F / norm)'], {}), '(F_signal, F / norm)\n', (5465, 5485), True, 'import numpy as np\n'), ((5562, 5602), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['x', 'x_new'], {}), '(x, x_new)\n', (5592, 5602), True, 'import numpy as np\n'), ((5615, 5660), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['y', 'y_new.real'], {}), '(y, y_new.real)\n', (5645, 5660), True, 'import numpy as np\n'), ((1035, 1065), 'numpy.sin', 'np.sin', (['(80.0 * 2.0 * np.pi * x)'], {}), '(80.0 * 2.0 * np.pi * x)\n', (1041, 1065), True, 'import numpy as np\n'), ((4270, 4287), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {}), '(y)\n', (4284, 4287), True, 'import numpy as np\n'), ((4323, 4347), 'numpy.linalg.norm', 'np.linalg.norm', (['F_signal'], {}), '(F_signal)\n', (4337, 4347), True, 'import numpy as np\n'), ((5704, 5720), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (5717, 5720), True, 'import numpy as np\n'), ((4666, 4692), 'deconvoluted.transforms.determine_norm', 'determine_norm', (['convention'], {}), '(convention)\n', (4680, 4692), False, 'from deconvoluted.transforms import determine_norm\n'), ((5367, 5387), 'numpy.abs', 'np.abs', (['convention.b'], {}), '(convention.b)\n', (5373, 5387), True, 'import numpy as np\n'), ((4936, 4953), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {}), '(y)\n', (4950, 4953), True, 'import numpy as np\n'), ((4959, 4976), 'numpy.linalg.norm', 'np.linalg.norm', (['F'], {}), '(F)\n', (4973, 4976), True, 'import numpy as np\n'), ((5064, 5085), 'numpy.linalg.norm', 'np.linalg.norm', (['y_new'], {}), '(y_new)\n', (5078, 5085), True, 'import numpy as np\n'), ((5091, 5108), 'numpy.linalg.norm', 'np.linalg.norm', (['F'], {}), '(F)\n', (5105, 5108), True, 'import numpy as np\n')] |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import time
import os
import random
import json
import six
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
from paddle.fluid.executor import Executor
import sys
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding("utf-8")
sys.path.append('..')
from args import *
import rc_model
from dataset import BRCDataset
import logging
import pickle
from utils import normalize
from utils import compute_bleu_rouge
from vocab import Vocab
def prepare_batch_input(insts, args):
batch_size = len(insts['raw_data'])
inst_num = len(insts['passage_num'])
if batch_size != inst_num:
print("data error %d, %d" % (batch_size, inst_num))
return None
new_insts = []
passage_idx = 0
for i in range(batch_size):
p_len = 0
p_id = []
p_ids = []
q_ids = []
q_id = []
p_id_r = []
p_ids_r = []
q_ids_r = []
q_id_r = []
for j in range(insts['passage_num'][i]):
p_ids.append(insts['passage_token_ids'][passage_idx + j])
p_id = p_id + insts['passage_token_ids'][passage_idx + j]
q_ids.append(insts['question_token_ids'][passage_idx + j])
q_id = q_id + insts['question_token_ids'][passage_idx + j]
passage_idx += insts['passage_num'][i]
p_len = len(p_id)
def _get_label(idx, ref_len):
ret = [0.0] * ref_len
if idx >= 0 and idx < ref_len:
ret[idx] = 1.0
return [[x] for x in ret]
start_label = _get_label(insts['start_id'][i], p_len)
end_label = _get_label(insts['end_id'][i], p_len)
new_inst = [q_ids, start_label, end_label, p_ids, q_id]
new_insts.append(new_inst)
return new_insts
def batch_reader(batch_list, args):
res = []
for batch in batch_list:
res.append(prepare_batch_input(batch, args))
return res
def read_multiple(reader, count, clip_last=True):
"""
Stack data from reader for multi-devices.
"""
def __impl__():
res = []
for item in reader():
res.append(item)
if len(res) == count:
yield res
res = []
if len(res) == count:
yield res
elif not clip_last:
data = []
for item in res:
data += item
if len(data) > count:
inst_num_per_part = len(data) // count
yield [
data[inst_num_per_part * i:inst_num_per_part * (i + 1)]
for i in range(count)
]
return __impl__
def LodTensor_Array(lod_tensor):
lod = lod_tensor.lod()
array = np.array(lod_tensor)
new_array = []
for i in range(len(lod[0]) - 1):
new_array.append(array[lod[0][i]:lod[0][i + 1]])
return new_array
def print_para(train_prog, train_exe, logger, args):
if args.para_print:
param_list = train_prog.block(0).all_parameters()
param_name_list = [p.name for p in param_list]
num_sum = 0
for p_name in param_name_list:
p_array = np.array(train_exe.scope.find_var(p_name).get_tensor())
param_num = np.prod(p_array.shape)
num_sum = num_sum + param_num
logger.info(
"param: {0}, mean={1} max={2} min={3} num={4} {5}".format(
p_name,
p_array.mean(),
p_array.max(), p_array.min(), p_array.shape, param_num))
logger.info("total param num: {0}".format(num_sum))
def find_best_answer_for_passage(start_probs, end_probs, passage_len):
"""
Finds the best answer with the maximum start_prob * end_prob from a single passage
"""
if passage_len is None:
passage_len = len(start_probs)
else:
passage_len = min(len(start_probs), passage_len)
best_start, best_end, max_prob = -1, -1, 0
for start_idx in range(passage_len):
for ans_len in range(args.max_a_len):
end_idx = start_idx + ans_len
if end_idx >= passage_len:
continue
prob = start_probs[start_idx] * end_probs[end_idx]
if prob > max_prob:
best_start = start_idx
best_end = end_idx
max_prob = prob
return (best_start, best_end), max_prob
def find_best_answer_for_inst(sample, start_prob, end_prob, inst_lod):
"""
Finds the best answer for a sample given start_prob and end_prob for each position.
This will call find_best_answer_for_passage because there are multiple passages in a sample
"""
best_p_idx, best_span, best_score = None, None, 0
for p_idx, passage in enumerate(sample['passages']):
if p_idx >= args.max_p_num:
continue
if len(start_prob) != len(end_prob):
logger.info('error: {}'.format(sample['question']))
continue
passage_start = inst_lod[p_idx] - inst_lod[0]
passage_end = inst_lod[p_idx + 1] - inst_lod[0]
passage_len = passage_end - passage_start
passage_len = min(args.max_p_len, len(passage['passage_tokens']))
answer_span, score = find_best_answer_for_passage(
start_prob[passage_start:passage_end],
end_prob[passage_start:passage_end], passage_len)
if score > best_score:
best_score = score
best_p_idx = p_idx
best_span = answer_span
if best_p_idx is None or best_span is None:
best_answer = ''
else:
best_answer = ''.join(sample['passages'][best_p_idx]['passage_tokens'][
best_span[0]:best_span[1] + 1])
return best_answer, best_span
def validation(inference_program, avg_cost, s_probs, e_probs, match, feed_order,
place, dev_count, vocab, brc_data, logger, args):
"""
"""
parallel_executor = fluid.ParallelExecutor(
main_program=inference_program,
use_cuda=bool(args.use_gpu),
loss_name=avg_cost.name)
print_para(inference_program, parallel_executor, logger, args)
# Use test set as validation each pass
total_loss = 0.0
count = 0
n_batch_cnt = 0
n_batch_loss = 0.0
pred_answers, ref_answers = [], []
val_feed_list = [
inference_program.global_block().var(var_name)
for var_name in feed_order
]
val_feeder = fluid.DataFeeder(val_feed_list, place)
pad_id = vocab.get_id(vocab.pad_token)
dev_reader = lambda:brc_data.gen_mini_batches('dev', args.batch_size, pad_id, shuffle=False)
dev_reader = read_multiple(dev_reader, dev_count)
for batch_id, batch_list in enumerate(dev_reader(), 1):
feed_data = batch_reader(batch_list, args)
val_fetch_outs = parallel_executor.run(
feed=list(val_feeder.feed_parallel(feed_data, dev_count)),
fetch_list=[avg_cost.name, s_probs.name, e_probs.name, match.name],
return_numpy=False)
total_loss += np.array(val_fetch_outs[0]).sum()
start_probs_m = LodTensor_Array(val_fetch_outs[1])
end_probs_m = LodTensor_Array(val_fetch_outs[2])
match_lod = val_fetch_outs[3].lod()
count += len(np.array(val_fetch_outs[0]))
n_batch_cnt += len(np.array(val_fetch_outs[0]))
n_batch_loss += np.array(val_fetch_outs[0]).sum()
log_every_n_batch = args.log_interval
if log_every_n_batch > 0 and batch_id % log_every_n_batch == 0:
logger.info('Average dev loss from batch {} to {} is {}'.format(
batch_id - log_every_n_batch + 1, batch_id, "%.10f" % (
n_batch_loss / n_batch_cnt)))
n_batch_loss = 0.0
n_batch_cnt = 0
for idx, batch in enumerate(batch_list):
#one batch
batch_size = len(batch['raw_data'])
batch_range = match_lod[0][idx * batch_size:(idx + 1) * batch_size +
1]
batch_lod = [[batch_range[x], batch_range[x + 1]]
for x in range(len(batch_range[:-1]))]
start_prob_batch = start_probs_m[idx * batch_size:(idx + 1) *
batch_size]
end_prob_batch = end_probs_m[idx * batch_size:(idx + 1) *
batch_size]
for sample, start_prob_inst, end_prob_inst, inst_range in zip(
batch['raw_data'], start_prob_batch, end_prob_batch,
batch_lod):
#one instance
inst_lod = match_lod[1][inst_range[0]:inst_range[1] + 1]
best_answer, best_span = find_best_answer_for_inst(
sample, start_prob_inst, end_prob_inst, inst_lod)
pred = {
'question_id': sample['question_id'],
'question_type': sample['question_type'],
'answers': [best_answer],
'entity_answers': [[]],
'yesno_answers': [best_span]
}
pred_answers.append(pred)
if 'answers' in sample:
ref = {
'question_id': sample['question_id'],
'question_type': sample['question_type'],
'answers': sample['answers'],
'entity_answers': [[]],
'yesno_answers': []
}
ref_answers.append(ref)
result_dir = args.result_dir
result_prefix = args.result_name
if result_dir is not None and result_prefix is not None:
if not os.path.exists(args.result_dir):
os.makedirs(args.result_dir)
result_file = os.path.join(result_dir, result_prefix + 'json')
with open(result_file, 'w') as fout:
for pred_answer in pred_answers:
fout.write(json.dumps(pred_answer, ensure_ascii=False) + '\n')
logger.info('Saving {} results to {}'.format(result_prefix,
result_file))
ave_loss = 1.0 * total_loss / count
# compute the bleu and rouge scores if reference answers is provided
if len(ref_answers) > 0:
pred_dict, ref_dict = {}, {}
for pred, ref in zip(pred_answers, ref_answers):
question_id = ref['question_id']
if len(ref['answers']) > 0:
pred_dict[question_id] = normalize(pred['answers'])
ref_dict[question_id] = normalize(ref['answers'])
bleu_rouge = compute_bleu_rouge(pred_dict, ref_dict)
else:
bleu_rouge = None
return ave_loss, bleu_rouge
def train(logger, args):
logger.info('Load data_set and vocab...')
with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin:
if six.PY2:
vocab = pickle.load(fin)
else:
vocab = pickle.load(fin, encoding='bytes')
logger.info('vocab size is {} and embed dim is {}'.format(vocab.size(
), vocab.embed_dim))
brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len,
args.trainset, args.devset)
logger.info('Converting text into ids...')
brc_data.convert_to_ids(vocab)
logger.info('Initialize the model...')
if not args.use_gpu:
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
else:
place = fluid.CUDAPlace(0)
dev_count = fluid.core.get_cuda_device_count()
# build model
main_program = fluid.Program()
startup_prog = fluid.Program()
main_program.random_seed = args.random_seed
startup_prog.random_seed = args.random_seed
with fluid.program_guard(main_program, startup_prog):
with fluid.unique_name.guard():
avg_cost, s_probs, e_probs, match, feed_order = rc_model.rc_model(
args.hidden_size, vocab, args)
# clone from default main program and use it as the validation program
inference_program = main_program.clone(for_test=True)
# build optimizer
if args.optim == 'sgd':
optimizer = fluid.optimizer.SGD(
learning_rate=args.learning_rate,
regularization=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=args.weight_decay))
elif args.optim == 'adam':
optimizer = fluid.optimizer.Adam(
learning_rate=args.learning_rate,
regularization=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=args.weight_decay))
elif args.optim == 'rprop':
optimizer = fluid.optimizer.RMSPropOptimizer(
learning_rate=args.learning_rate,
regularization=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=args.weight_decay))
else:
logger.error('Unsupported optimizer: {}'.format(args.optim))
exit(-1)
optimizer.minimize(avg_cost)
# initialize parameters
place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace()
exe = Executor(place)
if args.load_dir:
logger.info('load from {}'.format(args.load_dir))
fluid.io.load_persistables(
exe, args.load_dir, main_program=main_program)
else:
exe.run(startup_prog)
embedding_para = fluid.global_scope().find_var(
'embedding_para').get_tensor()
embedding_para.set(vocab.embeddings.astype(np.float32), place)
# prepare data
feed_list = [
main_program.global_block().var(var_name)
for var_name in feed_order
]
feeder = fluid.DataFeeder(feed_list, place)
logger.info('Training the model...')
parallel_executor = fluid.ParallelExecutor(
main_program=main_program,
use_cuda=bool(args.use_gpu),
loss_name=avg_cost.name)
print_para(main_program, parallel_executor, logger, args)
for pass_id in range(1, args.pass_num + 1):
pass_start_time = time.time()
pad_id = vocab.get_id(vocab.pad_token)
train_reader = lambda:brc_data.gen_mini_batches('train', args.batch_size, pad_id, shuffle=False)
train_reader = read_multiple(train_reader, dev_count)
log_every_n_batch, n_batch_loss = args.log_interval, 0
total_num, total_loss = 0, 0
for batch_id, batch_list in enumerate(train_reader(), 1):
feed_data = batch_reader(batch_list, args)
fetch_outs = parallel_executor.run(
feed=list(feeder.feed_parallel(feed_data, dev_count)),
fetch_list=[avg_cost.name],
return_numpy=False)
cost_train = np.array(fetch_outs[0]).mean()
total_num += args.batch_size * dev_count
n_batch_loss += cost_train
total_loss += cost_train * args.batch_size * dev_count
if log_every_n_batch > 0 and batch_id % log_every_n_batch == 0:
print_para(main_program, parallel_executor, logger,
args)
logger.info(
'Average loss from batch {} to {} is {}'.format(
batch_id - log_every_n_batch + 1, batch_id,
"%.10f" % (n_batch_loss / log_every_n_batch)))
n_batch_loss = 0
if args.dev_interval > 0 and batch_id % args.dev_interval == 0:
if brc_data.dev_set is not None:
eval_loss, bleu_rouge = validation(
inference_program, avg_cost, s_probs, e_probs,
match, feed_order, place, dev_count, vocab,
brc_data, logger, args)
logger.info('Dev eval loss {}'.format(eval_loss))
logger.info('Dev eval result: {}'.format(
bleu_rouge))
pass_end_time = time.time()
logger.info('Evaluating the model after epoch {}'.format(
pass_id))
if brc_data.dev_set is not None:
eval_loss, bleu_rouge = validation(
inference_program, avg_cost, s_probs, e_probs, match,
feed_order, place, dev_count, vocab, brc_data, logger,
args)
logger.info('Dev eval loss {}'.format(eval_loss))
logger.info('Dev eval result: {}'.format(bleu_rouge))
else:
logger.warning(
'No dev set is loaded for evaluation in the dataset!')
time_consumed = pass_end_time - pass_start_time
logger.info('Average train loss for epoch {} is {}'.format(
pass_id, "%.10f" % (1.0 * total_loss / total_num)))
if pass_id % args.save_interval == 0:
model_path = os.path.join(args.save_dir, str(pass_id))
if not os.path.isdir(model_path):
os.makedirs(model_path)
fluid.io.save_persistables(
executor=exe,
dirname=model_path,
main_program=main_program)
def evaluate(logger, args):
logger.info('Load data_set and vocab...')
with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin:
vocab = pickle.load(fin)
logger.info('vocab size is {} and embed dim is {}'.format(vocab.size(
), vocab.embed_dim))
brc_data = BRCDataset(
args.max_p_num, args.max_p_len, args.max_q_len, dev_files=args.devset)
logger.info('Converting text into ids...')
brc_data.convert_to_ids(vocab)
logger.info('Initialize the model...')
# build model
main_program = fluid.Program()
startup_prog = fluid.Program()
main_program.random_seed = args.random_seed
startup_prog.random_seed = args.random_seed
with fluid.program_guard(main_program, startup_prog):
with fluid.unique_name.guard():
avg_cost, s_probs, e_probs, match, feed_order = rc_model.rc_model(
args.hidden_size, vocab, args)
# initialize parameters
if not args.use_gpu:
place = fluid.CPUPlace()
dev_count = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
else:
place = fluid.CUDAPlace(0)
dev_count = fluid.core.get_cuda_device_count()
exe = Executor(place)
if args.load_dir:
logger.info('load from {}'.format(args.load_dir))
fluid.io.load_persistables(
exe, args.load_dir, main_program=main_program)
else:
logger.error('No model file to load ...')
return
inference_program = main_program.clone(for_test=True)
eval_loss, bleu_rouge = validation(
inference_program, avg_cost, s_probs, e_probs, feed_order,
place, dev_count, vocab, brc_data, logger, args)
logger.info('Dev eval loss {}'.format(eval_loss))
logger.info('Dev eval result: {}'.format(bleu_rouge))
logger.info('Predicted answers are saved to {}'.format(
os.path.join(args.result_dir)))
def predict(logger, args):
logger.info('Load data_set and vocab...')
with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin:
vocab = pickle.load(fin)
logger.info('vocab size is {} and embed dim is {}'.format(vocab.size(
), vocab.embed_dim))
brc_data = BRCDataset(
args.max_p_num, args.max_p_len, args.max_q_len, dev_files=args.testset)
logger.info('Converting text into ids...')
brc_data.convert_to_ids(vocab)
logger.info('Initialize the model...')
# build model
main_program = fluid.Program()
startup_prog = fluid.Program()
main_program.random_seed = args.random_seed
startup_prog.random_seed = args.random_seed
with fluid.program_guard(main_program, startup_prog):
with fluid.unique_name.guard():
avg_cost, s_probs, e_probs, match, feed_order = rc_model.rc_model(
args.hidden_size, vocab, args)
# initialize parameters
if not args.use_gpu:
place = fluid.CPUPlace()
dev_count = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
else:
place = fluid.CUDAPlace(0)
dev_count = fluid.core.get_cuda_device_count()
exe = Executor(place)
if args.load_dir:
logger.info('load from {}'.format(args.load_dir))
fluid.io.load_persistables(
exe, args.load_dir, main_program=main_program)
else:
logger.error('No model file to load ...')
return
inference_program = main_program.clone(for_test=True)
eval_loss, bleu_rouge = validation(
inference_program, avg_cost, s_probs, e_probs, match,
feed_order, place, dev_count, vocab, brc_data, logger, args)
def prepare(logger, args):
"""
checks data, creates the directories, prepare the vocabulary and embeddings
"""
logger.info('Checking the data files...')
for data_path in args.trainset + args.devset + args.testset:
assert os.path.exists(data_path), '{} file does not exist.'.format(
data_path)
logger.info('Preparing the directories...')
for dir_path in [args.vocab_dir, args.save_dir, args.result_dir]:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
logger.info('Building vocabulary...')
brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len,
args.trainset, args.devset, args.testset)
vocab = Vocab(lower=True)
for word in brc_data.word_iter('train'):
vocab.add(word)
unfiltered_vocab_size = vocab.size()
vocab.filter_tokens_by_cnt(min_cnt=2)
filtered_num = unfiltered_vocab_size - vocab.size()
logger.info('After filter {} tokens, the final vocab size is {}'.format(
filtered_num, vocab.size()))
logger.info('Assigning embeddings...')
vocab.randomly_init_embeddings(args.embed_size)
logger.info('Saving vocab...')
with open(os.path.join(args.vocab_dir, 'vocab.data'), 'wb') as fout:
pickle.dump(vocab, fout)
logger.info('Done with preparing!')
if __name__ == '__main__':
args = parse_args()
random.seed(args.random_seed)
np.random.seed(args.random_seed)
logger = logging.getLogger("brc")
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if args.log_path:
file_handler = logging.FileHandler(args.log_path)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
else:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
args = parse_args()
logger.info('Running with args : {}'.format(args))
if args.prepare:
prepare(logger, args)
if args.train:
train(logger, args)
if args.evaluate:
evaluate(logger, args)
if args.predict:
predict(logger, args)
| [
"pickle.dump",
"numpy.random.seed",
"paddle.fluid.regularizer.L2DecayRegularizer",
"paddle.fluid.program_guard",
"paddle.fluid.core.CPUPlace",
"json.dumps",
"logging.Formatter",
"pickle.load",
"paddle.fluid.executor.Executor",
"os.path.join",
"numpy.prod",
"sys.path.append",
"logging.FileHan... | [((1053, 1074), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (1068, 1074), False, 'import sys\n'), ((1021, 1052), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (1043, 1052), False, 'import sys\n'), ((3518, 3538), 'numpy.array', 'np.array', (['lod_tensor'], {}), '(lod_tensor)\n', (3526, 3538), True, 'import numpy as np\n'), ((7232, 7270), 'paddle.fluid.DataFeeder', 'fluid.DataFeeder', (['val_feed_list', 'place'], {}), '(val_feed_list, place)\n', (7248, 7270), True, 'import paddle.fluid as fluid\n'), ((11936, 12026), 'dataset.BRCDataset', 'BRCDataset', (['args.max_p_num', 'args.max_p_len', 'args.max_q_len', 'args.trainset', 'args.devset'], {}), '(args.max_p_num, args.max_p_len, args.max_q_len, args.trainset,\n args.devset)\n', (11946, 12026), False, 'from dataset import BRCDataset\n'), ((12451, 12466), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (12464, 12466), True, 'import paddle.fluid as fluid\n'), ((12486, 12501), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (12499, 12501), True, 'import paddle.fluid as fluid\n'), ((18993, 19079), 'dataset.BRCDataset', 'BRCDataset', (['args.max_p_num', 'args.max_p_len', 'args.max_q_len'], {'dev_files': 'args.devset'}), '(args.max_p_num, args.max_p_len, args.max_q_len, dev_files=args.\n devset)\n', (19003, 19079), False, 'from dataset import BRCDataset\n'), ((19247, 19262), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (19260, 19262), True, 'import paddle.fluid as fluid\n'), ((19282, 19297), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (19295, 19297), True, 'import paddle.fluid as fluid\n'), ((21103, 21190), 'dataset.BRCDataset', 'BRCDataset', (['args.max_p_num', 'args.max_p_len', 'args.max_q_len'], {'dev_files': 'args.testset'}), '(args.max_p_num, args.max_p_len, args.max_q_len, dev_files=args.\n testset)\n', (21113, 21190), False, 'from dataset import BRCDataset\n'), ((21358, 21373), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (21371, 21373), True, 'import paddle.fluid as fluid\n'), ((21393, 21408), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (21406, 21408), True, 'import paddle.fluid as fluid\n'), ((23261, 23365), 'dataset.BRCDataset', 'BRCDataset', (['args.max_p_num', 'args.max_p_len', 'args.max_q_len', 'args.trainset', 'args.devset', 'args.testset'], {}), '(args.max_p_num, args.max_p_len, args.max_q_len, args.trainset,\n args.devset, args.testset)\n', (23271, 23365), False, 'from dataset import BRCDataset\n'), ((23400, 23417), 'vocab.Vocab', 'Vocab', ([], {'lower': '(True)'}), '(lower=True)\n', (23405, 23417), False, 'from vocab import Vocab\n'), ((24078, 24107), 'random.seed', 'random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (24089, 24107), False, 'import random\n'), ((24112, 24144), 'numpy.random.seed', 'np.random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (24126, 24144), True, 'import numpy as np\n'), ((24159, 24183), 'logging.getLogger', 'logging.getLogger', (['"""brc"""'], {}), "('brc')\n", (24176, 24183), False, 'import logging\n'), ((24234, 24307), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (24251, 24307), False, 'import logging\n'), ((10605, 10653), 'os.path.join', 'os.path.join', (['result_dir', "(result_prefix + 'json')"], {}), "(result_dir, result_prefix + 'json')\n", (10617, 10653), False, 'import os\n'), ((11435, 11474), 'utils.compute_bleu_rouge', 'compute_bleu_rouge', (['pred_dict', 'ref_dict'], {}), '(pred_dict, ref_dict)\n', (11453, 11474), False, 'from utils import compute_bleu_rouge\n'), ((12216, 12232), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (12230, 12232), True, 'import paddle.fluid as fluid\n'), ((12339, 12357), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (12354, 12357), True, 'import paddle.fluid as fluid\n'), ((12378, 12412), 'paddle.fluid.core.get_cuda_device_count', 'fluid.core.get_cuda_device_count', ([], {}), '()\n', (12410, 12412), True, 'import paddle.fluid as fluid\n'), ((12607, 12654), 'paddle.fluid.program_guard', 'fluid.program_guard', (['main_program', 'startup_prog'], {}), '(main_program, startup_prog)\n', (12626, 12654), True, 'import paddle.fluid as fluid\n'), ((18854, 18870), 'pickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (18865, 18870), False, 'import pickle\n'), ((19403, 19450), 'paddle.fluid.program_guard', 'fluid.program_guard', (['main_program', 'startup_prog'], {}), '(main_program, startup_prog)\n', (19422, 19450), True, 'import paddle.fluid as fluid\n'), ((20964, 20980), 'pickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (20975, 20980), False, 'import pickle\n'), ((21514, 21561), 'paddle.fluid.program_guard', 'fluid.program_guard', (['main_program', 'startup_prog'], {}), '(main_program, startup_prog)\n', (21533, 21561), True, 'import paddle.fluid as fluid\n'), ((22926, 22951), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (22940, 22951), False, 'import os\n'), ((23954, 23978), 'pickle.dump', 'pickle.dump', (['vocab', 'fout'], {}), '(vocab, fout)\n', (23965, 23978), False, 'import pickle\n'), ((24362, 24396), 'logging.FileHandler', 'logging.FileHandler', (['args.log_path'], {}), '(args.log_path)\n', (24381, 24396), False, 'import logging\n'), ((24562, 24585), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (24583, 24585), False, 'import logging\n'), ((4026, 4048), 'numpy.prod', 'np.prod', (['p_array.shape'], {}), '(p_array.shape)\n', (4033, 4048), True, 'import numpy as np\n'), ((8045, 8072), 'numpy.array', 'np.array', (['val_fetch_outs[0]'], {}), '(val_fetch_outs[0])\n', (8053, 8072), True, 'import numpy as np\n'), ((8102, 8129), 'numpy.array', 'np.array', (['val_fetch_outs[0]'], {}), '(val_fetch_outs[0])\n', (8110, 8129), True, 'import numpy as np\n'), ((10509, 10540), 'os.path.exists', 'os.path.exists', (['args.result_dir'], {}), '(args.result_dir)\n', (10523, 10540), False, 'import os\n'), ((10554, 10582), 'os.makedirs', 'os.makedirs', (['args.result_dir'], {}), '(args.result_dir)\n', (10565, 10582), False, 'import os\n'), ((11630, 11672), 'os.path.join', 'os.path.join', (['args.vocab_dir', '"""vocab.data"""'], {}), "(args.vocab_dir, 'vocab.data')\n", (11642, 11672), False, 'import os\n'), ((11728, 11744), 'pickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (11739, 11744), False, 'import pickle\n'), ((11779, 11813), 'pickle.load', 'pickle.load', (['fin'], {'encoding': '"""bytes"""'}), "(fin, encoding='bytes')\n", (11790, 11813), False, 'import pickle\n'), ((12669, 12694), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (12692, 12694), True, 'import paddle.fluid as fluid\n'), ((12756, 12804), 'rc_model.rc_model', 'rc_model.rc_model', (['args.hidden_size', 'vocab', 'args'], {}), '(args.hidden_size, vocab, args)\n', (12773, 12804), False, 'import rc_model\n'), ((14146, 14161), 'paddle.fluid.executor.Executor', 'Executor', (['place'], {}), '(place)\n', (14154, 14161), False, 'from paddle.fluid.executor import Executor\n'), ((14809, 14843), 'paddle.fluid.DataFeeder', 'fluid.DataFeeder', (['feed_list', 'place'], {}), '(feed_list, place)\n', (14825, 14843), True, 'import paddle.fluid as fluid\n'), ((18780, 18822), 'os.path.join', 'os.path.join', (['args.vocab_dir', '"""vocab.data"""'], {}), "(args.vocab_dir, 'vocab.data')\n", (18792, 18822), False, 'import os\n'), ((19465, 19490), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (19488, 19490), True, 'import paddle.fluid as fluid\n'), ((19552, 19600), 'rc_model.rc_model', 'rc_model.rc_model', (['args.hidden_size', 'vocab', 'args'], {}), '(args.hidden_size, vocab, args)\n', (19569, 19600), False, 'import rc_model\n'), ((19980, 19995), 'paddle.fluid.executor.Executor', 'Executor', (['place'], {}), '(place)\n', (19988, 19995), False, 'from paddle.fluid.executor import Executor\n'), ((20890, 20932), 'os.path.join', 'os.path.join', (['args.vocab_dir', '"""vocab.data"""'], {}), "(args.vocab_dir, 'vocab.data')\n", (20902, 20932), False, 'import os\n'), ((21576, 21601), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (21599, 21601), True, 'import paddle.fluid as fluid\n'), ((21663, 21711), 'rc_model.rc_model', 'rc_model.rc_model', (['args.hidden_size', 'vocab', 'args'], {}), '(args.hidden_size, vocab, args)\n', (21680, 21711), False, 'import rc_model\n'), ((22091, 22106), 'paddle.fluid.executor.Executor', 'Executor', (['place'], {}), '(place)\n', (22099, 22106), False, 'from paddle.fluid.executor import Executor\n'), ((23143, 23167), 'os.path.exists', 'os.path.exists', (['dir_path'], {}), '(dir_path)\n', (23157, 23167), False, 'import os\n'), ((23181, 23202), 'os.makedirs', 'os.makedirs', (['dir_path'], {}), '(dir_path)\n', (23192, 23202), False, 'import os\n'), ((23887, 23929), 'os.path.join', 'os.path.join', (['args.vocab_dir', '"""vocab.data"""'], {}), "(args.vocab_dir, 'vocab.data')\n", (23899, 23929), False, 'import os\n'), ((7830, 7857), 'numpy.array', 'np.array', (['val_fetch_outs[0]'], {}), '(val_fetch_outs[0])\n', (7838, 7857), True, 'import numpy as np\n'), ((8155, 8182), 'numpy.array', 'np.array', (['val_fetch_outs[0]'], {}), '(val_fetch_outs[0])\n', (8163, 8182), True, 'import numpy as np\n'), ((11321, 11347), 'utils.normalize', 'normalize', (["pred['answers']"], {}), "(pred['answers'])\n", (11330, 11347), False, 'from utils import normalize\n'), ((11388, 11413), 'utils.normalize', 'normalize', (["ref['answers']"], {}), "(ref['answers'])\n", (11397, 11413), False, 'from utils import normalize\n'), ((14073, 14090), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (14087, 14090), True, 'import paddle.fluid.core as core\n'), ((14112, 14127), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (14125, 14127), True, 'import paddle.fluid.core as core\n'), ((14274, 14347), 'paddle.fluid.io.load_persistables', 'fluid.io.load_persistables', (['exe', 'args.load_dir'], {'main_program': 'main_program'}), '(exe, args.load_dir, main_program=main_program)\n', (14300, 14347), True, 'import paddle.fluid as fluid\n'), ((15240, 15251), 'time.time', 'time.time', ([], {}), '()\n', (15249, 15251), False, 'import time\n'), ((17374, 17385), 'time.time', 'time.time', ([], {}), '()\n', (17383, 17385), False, 'import time\n'), ((19711, 19727), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (19725, 19727), True, 'import paddle.fluid as fluid\n'), ((19879, 19897), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (19894, 19897), True, 'import paddle.fluid as fluid\n'), ((19926, 19960), 'paddle.fluid.core.get_cuda_device_count', 'fluid.core.get_cuda_device_count', ([], {}), '()\n', (19958, 19960), True, 'import paddle.fluid as fluid\n'), ((20108, 20181), 'paddle.fluid.io.load_persistables', 'fluid.io.load_persistables', (['exe', 'args.load_dir'], {'main_program': 'main_program'}), '(exe, args.load_dir, main_program=main_program)\n', (20134, 20181), True, 'import paddle.fluid as fluid\n'), ((21822, 21838), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (21836, 21838), True, 'import paddle.fluid as fluid\n'), ((21990, 22008), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (22005, 22008), True, 'import paddle.fluid as fluid\n'), ((22037, 22071), 'paddle.fluid.core.get_cuda_device_count', 'fluid.core.get_cuda_device_count', ([], {}), '()\n', (22069, 22071), True, 'import paddle.fluid as fluid\n'), ((22219, 22292), 'paddle.fluid.io.load_persistables', 'fluid.io.load_persistables', (['exe', 'args.load_dir'], {'main_program': 'main_program'}), '(exe, args.load_dir, main_program=main_program)\n', (22245, 22292), True, 'import paddle.fluid as fluid\n'), ((18529, 18621), 'paddle.fluid.io.save_persistables', 'fluid.io.save_persistables', ([], {'executor': 'exe', 'dirname': 'model_path', 'main_program': 'main_program'}), '(executor=exe, dirname=model_path, main_program=\n main_program)\n', (18555, 18621), True, 'import paddle.fluid as fluid\n'), ((20769, 20798), 'os.path.join', 'os.path.join', (['args.result_dir'], {}), '(args.result_dir)\n', (20781, 20798), False, 'import os\n'), ((10771, 10814), 'json.dumps', 'json.dumps', (['pred_answer'], {'ensure_ascii': '(False)'}), '(pred_answer, ensure_ascii=False)\n', (10781, 10814), False, 'import json\n'), ((13176, 13252), 'paddle.fluid.regularizer.L2DecayRegularizer', 'fluid.regularizer.L2DecayRegularizer', ([], {'regularization_coeff': 'args.weight_decay'}), '(regularization_coeff=args.weight_decay)\n', (13212, 13252), True, 'import paddle.fluid as fluid\n'), ((18433, 18458), 'os.path.isdir', 'os.path.isdir', (['model_path'], {}), '(model_path)\n', (18446, 18458), False, 'import os\n'), ((18484, 18507), 'os.makedirs', 'os.makedirs', (['model_path'], {}), '(model_path)\n', (18495, 18507), False, 'import os\n'), ((13457, 13533), 'paddle.fluid.regularizer.L2DecayRegularizer', 'fluid.regularizer.L2DecayRegularizer', ([], {'regularization_coeff': 'args.weight_decay'}), '(regularization_coeff=args.weight_decay)\n', (13493, 13533), True, 'import paddle.fluid as fluid\n'), ((16007, 16030), 'numpy.array', 'np.array', (['fetch_outs[0]'], {}), '(fetch_outs[0])\n', (16015, 16030), True, 'import numpy as np\n'), ((13752, 13828), 'paddle.fluid.regularizer.L2DecayRegularizer', 'fluid.regularizer.L2DecayRegularizer', ([], {'regularization_coeff': 'args.weight_decay'}), '(regularization_coeff=args.weight_decay)\n', (13788, 13828), True, 'import paddle.fluid as fluid\n'), ((14458, 14478), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (14476, 14478), True, 'import paddle.fluid as fluid\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines base classes for all models. The base class of all
models is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is
the base class for all fittable models. Fittable models can be linear or
nonlinear in a regression analysis sense.
All models provide a `__call__` method which performs the transformation in
a purely mathematical way, i.e. the models are unitless. Model instances can
represent either a single model, or a "model set" representing multiple copies
of the same type of model, but with potentially different values of the
parameters in each model making up the set.
"""
# pylint: disable=invalid-name, protected-access, redefined-outer-name
import abc
import copy
import inspect
import functools
import operator
import types
import warnings
from collections import defaultdict, OrderedDict, deque
from inspect import signature
from itertools import chain
import numpy as np
from astropy.utils import indent, metadata
from astropy.table import Table
from astropy.units import Quantity, UnitsError, dimensionless_unscaled
from astropy.units.utils import quantity_asanyarray
from astropy.utils import (sharedmethod, find_current_module,
check_broadcast, IncompatibleShapeError, isiterable)
from astropy.utils.codegen import make_function_with_signature
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.misc import get_parameters
from astropy.nddata.utils import add_array, extract_array
from .utils import (combine_labels, make_binary_operator_eval,
get_inputs_and_params, _BoundingBox, _combine_equivalency_dict,
_ConstraintsDict)
from .parameters import (Parameter, InputParameterError,
param_repr_oneline, _tofloat)
__all__ = ['Model', 'FittableModel', 'Fittable1DModel', 'Fittable2DModel',
'CompoundModel', 'fix_inputs', 'custom_model', 'ModelDefinitionError']
def _model_oper(oper, **kwargs):
"""
Returns a function that evaluates a given Python arithmetic operator
between two models. The operator should be given as a string, like ``'+'``
or ``'**'``.
"""
return lambda left, right: CompoundModel(oper, left, right, **kwargs)
class ModelDefinitionError(TypeError):
"""Used for incorrect models definitions."""
class _ModelMeta(abc.ABCMeta):
"""
Metaclass for Model.
Currently just handles auto-generating the param_names list based on
Parameter descriptors declared at the class-level of Model subclasses.
"""
@classmethod
def __prepare__(mcls, name, bases):
return OrderedDict()
_is_dynamic = False
"""
This flag signifies whether this class was created in the "normal" way,
with a class statement in the body of a module, as opposed to a call to
`type` or some other metaclass constructor, such that the resulting class
does not belong to a specific module. This is important for pickling of
dynamic classes.
This flag is always forced to False for new classes, so code that creates
dynamic classes should manually set it to True on those classes when
creating them.
"""
# Default empty dict for _parameters_, which will be empty on model
# classes that don't have any Parameters
def __new__(mcls, name, bases, members):
# See the docstring for _is_dynamic above
if '_is_dynamic' not in members:
members['_is_dynamic'] = mcls._is_dynamic
get_parameters(members)
opermethods = [
('__add__', _model_oper('+')),
('__sub__', _model_oper('-')),
('__mul__', _model_oper('*')),
('__truediv__', _model_oper('/')),
('__pow__', _model_oper('**')),
('__or__', _model_oper('|')),
('__and__', _model_oper('&')),
('_fix_inputs', _model_oper('fix_inputs'))
]
for opermethod, opercall in opermethods:
members[opermethod] = opercall
cls = super().__new__(mcls, name, bases, members)
param_names = list(members['_parameters_'])
# Need to walk each base MRO to collect all parameter names
for base in bases:
for tbase in base.__mro__:
if issubclass(tbase, Model):
# Preserve order of definitions
param_names = list(tbase._parameters_) + param_names
# Remove duplicates (arising from redefintion in subclass).
param_names = list(dict.fromkeys(param_names))
if cls._parameters_:
if hasattr(cls, '_param_names'):
# Slight kludge to support compound models, where
# cls.param_names is a property; could be improved with a
# little refactoring but fine for now
cls._param_names = tuple(param_names)
else:
cls.param_names = tuple(param_names)
return cls
def __init__(cls, name, bases, members):
super(_ModelMeta, cls).__init__(name, bases, members)
if cls.__name__ != "CompoundModel":
cls._create_inverse_property(members)
cls._create_bounding_box_property(members)
pdict = OrderedDict()
for base in bases:
for tbase in base.__mro__:
if issubclass(tbase, Model):
for parname, val in cls._parameters_.items():
pdict[parname] = val
cls._handle_special_methods(members, pdict)
def __repr__(cls):
"""
Custom repr for Model subclasses.
"""
return cls._format_cls_repr()
def _repr_pretty_(cls, p, cycle):
"""
Repr for IPython's pretty printer.
By default IPython "pretty prints" classes, so we need to implement
this so that IPython displays the custom repr for Models.
"""
p.text(repr(cls))
def __reduce__(cls):
if not cls._is_dynamic:
# Just return a string specifying where the class can be imported
# from
return cls.__name__
members = dict(cls.__dict__)
# Delete any ABC-related attributes--these will be restored when
# the class is reconstructed:
for key in list(members):
if key.startswith('_abc_'):
del members[key]
# Delete custom __init__ and __call__ if they exist:
for key in ('__init__', '__call__'):
if key in members:
del members[key]
return (type(cls), (cls.__name__, cls.__bases__, members))
@property
def name(cls):
"""
The name of this model class--equivalent to ``cls.__name__``.
This attribute is provided for symmetry with the `Model.name` attribute
of model instances.
"""
return cls.__name__
@property
def _is_concrete(cls):
"""
A class-level property that determines whether the class is a concrete
implementation of a Model--i.e. it is not some abstract base class or
internal implementation detail (i.e. begins with '_').
"""
return not (cls.__name__.startswith('_') or inspect.isabstract(cls))
def rename(cls, name=None, inputs=None, outputs=None):
"""
Creates a copy of this model class with a new name, inputs or outputs.
The new class is technically a subclass of the original class, so that
instance and type checks will still work. For example::
>>> from astropy.modeling.models import Rotation2D
>>> SkyRotation = Rotation2D.rename('SkyRotation')
>>> SkyRotation
<class 'astropy.modeling.core.SkyRotation'>
Name: SkyRotation (Rotation2D)
N_inputs: 2
N_outputs: 2
Fittable parameters: ('angle',)
>>> issubclass(SkyRotation, Rotation2D)
True
>>> r = SkyRotation(90)
>>> isinstance(r, Rotation2D)
True
"""
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = '__main__'
if name is None:
name = cls.name
if inputs is None:
inputs = cls.inputs
else:
if not isinstance(inputs, tuple):
raise TypeError("Expected 'inputs' to be a tuple of strings.")
elif len(inputs) != len(cls.inputs):
raise ValueError(f'{cls.name} expects {len(cls.inputs)} inputs')
if outputs is None:
outputs = cls.outputs
else:
if not isinstance(outputs, tuple):
raise TypeError("Expected 'outputs' to be a tuple of strings.")
elif len(outputs) != len(cls.outputs):
raise ValueError(f'{cls.name} expects {len(cls.outputs)} outputs')
new_cls = type(name, (cls,), {"inputs": inputs, "outputs": outputs})
new_cls.__module__ = modname
new_cls.__qualname__ = name
return new_cls
def _create_inverse_property(cls, members):
inverse = members.get('inverse')
if inverse is None or cls.__bases__[0] is object:
# The latter clause is the prevent the below code from running on
# the Model base class, which implements the default getter and
# setter for .inverse
return
if isinstance(inverse, property):
# We allow the @property decorator to be omitted entirely from
# the class definition, though its use should be encouraged for
# clarity
inverse = inverse.fget
# Store the inverse getter internally, then delete the given .inverse
# attribute so that cls.inverse resolves to Model.inverse instead
cls._inverse = inverse
del cls.inverse
def _create_bounding_box_property(cls, members):
"""
Takes any bounding_box defined on a concrete Model subclass (either
as a fixed tuple or a property or method) and wraps it in the generic
getter/setter interface for the bounding_box attribute.
"""
# TODO: Much of this is verbatim from _create_inverse_property--I feel
# like there could be a way to generify properties that work this way,
# but for the time being that would probably only confuse things more.
bounding_box = members.get('bounding_box')
if bounding_box is None or cls.__bases__[0] is object:
return
if isinstance(bounding_box, property):
bounding_box = bounding_box.fget
if not callable(bounding_box):
# See if it's a hard-coded bounding_box (as a sequence) and
# normalize it
try:
bounding_box = _BoundingBox.validate(cls, bounding_box)
except ValueError as exc:
raise ModelDefinitionError(exc.args[0])
else:
sig = signature(bounding_box)
# May be a method that only takes 'self' as an argument (like a
# property, but the @property decorator was forgotten)
#
# However, if the method takes additional arguments then this is a
# parameterized bounding box and should be callable
if len(sig.parameters) > 1:
bounding_box = \
cls._create_bounding_box_subclass(bounding_box, sig)
# See the Model.bounding_box getter definition for how this attribute
# is used
cls._bounding_box = bounding_box
del cls.bounding_box
def _create_bounding_box_subclass(cls, func, sig):
"""
For Models that take optional arguments for defining their bounding
box, we create a subclass of _BoundingBox with a ``__call__`` method
that supports those additional arguments.
Takes the function's Signature as an argument since that is already
computed in _create_bounding_box_property, so no need to duplicate that
effort.
"""
# TODO: Might be convenient if calling the bounding box also
# automatically sets the _user_bounding_box. So that
#
# >>> model.bounding_box(arg=1)
#
# in addition to returning the computed bbox, also sets it, so that
# it's a shortcut for
#
# >>> model.bounding_box = model.bounding_box(arg=1)
#
# Not sure if that would be non-obvious / confusing though...
def __call__(self, **kwargs):
return func(self._model, **kwargs)
kwargs = []
for idx, param in enumerate(sig.parameters.values()):
if idx == 0:
# Presumed to be a 'self' argument
continue
if param.default is param.empty:
raise ModelDefinitionError(
'The bounding_box method for {0} is not correctly '
'defined: If defined as a method all arguments to that '
'method (besides self) must be keyword arguments with '
'default values that can be used to compute a default '
'bounding box.'.format(cls.name))
kwargs.append((param.name, param.default))
__call__.__signature__ = sig
return type('_{0}BoundingBox'.format(cls.name), (_BoundingBox,),
{'__call__': __call__})
def _handle_special_methods(cls, members, pdict):
# Handle init creation from inputs
def update_wrapper(wrapper, cls):
# Set up the new __call__'s metadata attributes as though it were
# manually defined in the class definition
# A bit like functools.update_wrapper but uses the class instead of
# the wrapped function
wrapper.__module__ = cls.__module__
wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__
if hasattr(cls, '__qualname__'):
wrapper.__qualname__ = '{0}.{1}'.format(
cls.__qualname__, wrapper.__name__)
if ('__call__' not in members and 'n_inputs' in members and
isinstance(members['n_inputs'], int) and members['n_inputs'] > 0):
# Don't create a custom __call__ for classes that already have one
# explicitly defined (this includes the Model base class, and any
# other classes that manually override __call__
def __call__(self, *inputs, **kwargs):
"""Evaluate this model on the supplied inputs."""
return super(cls, self).__call__(*inputs, **kwargs)
# When called, models can take two optional keyword arguments:
#
# * model_set_axis, which indicates (for multi-dimensional input)
# which axis is used to indicate different models
#
# * equivalencies, a dictionary of equivalencies to be applied to
# the input values, where each key should correspond to one of
# the inputs.
#
# The following code creates the __call__ function with these
# two keyword arguments.
args = ('self',)
kwargs = dict([('model_set_axis', None),
('with_bounding_box', False),
('fill_value', np.nan),
('equivalencies', None),
('inputs_map', None)])
new_call = make_function_with_signature(
__call__, args, kwargs, varargs='inputs', varkwargs='new_inputs')
# The following makes it look like __call__
# was defined in the class
update_wrapper(new_call, cls)
cls.__call__ = new_call
if ('__init__' not in members and not inspect.isabstract(cls) and
cls._parameters_):
# Build list of all parameters including inherited ones
# If *all* the parameters have default values we can make them
# keyword arguments; otherwise they must all be positional
# arguments
if all(p.default is not None for p in pdict.values()):
args = ('self',)
kwargs = []
for param_name, param_val in pdict.items():
default = param_val.default
unit = param_val.unit
# If the unit was specified in the parameter but the
# default is not a Quantity, attach the unit to the
# default.
if unit is not None:
default = Quantity(default, unit, copy=False)
kwargs.append((param_name, default))
else:
args = ('self',) + tuple(pdict.keys())
kwargs = {}
def __init__(self, *params, **kwargs):
return super(cls, self).__init__(*params, **kwargs)
new_init = make_function_with_signature(
__init__, args, kwargs, varkwargs='kwargs')
update_wrapper(new_init, cls)
cls.__init__ = new_init
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper('+')
__sub__ = _model_oper('-')
__mul__ = _model_oper('*')
__truediv__ = _model_oper('/')
__pow__ = _model_oper('**')
__or__ = _model_oper('|')
__and__ = _model_oper('&')
_fix_inputs = _model_oper('fix_inputs')
# *** Other utilities ***
def _format_cls_repr(cls, keywords=[]):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# For the sake of familiarity start the output with the standard class
# __repr__
parts = [super().__repr__()]
if not cls._is_concrete:
return parts[0]
def format_inheritance(cls):
bases = []
for base in cls.mro()[1:]:
if not issubclass(base, Model):
continue
elif (inspect.isabstract(base) or
base.__name__.startswith('_')):
break
bases.append(base.name)
if bases:
return '{0} ({1})'.format(cls.name, ' -> '.join(bases))
return cls.name
try:
default_keywords = [
('Name', format_inheritance(cls)),
('N_inputs', cls.n_inputs),
('N_outputs', cls.n_outputs),
]
if cls.param_names:
default_keywords.append(('Fittable parameters',
cls.param_names))
for keyword, value in default_keywords + keywords:
if value is not None:
parts.append('{0}: {1}'.format(keyword, value))
return '\n'.join(parts)
except Exception:
# If any of the above formatting fails fall back on the basic repr
# (this is particularly useful in debugging)
return parts[0]
class Model(metaclass=_ModelMeta):
"""
Base class for all models.
This is an abstract class and should not be instantiated directly.
The following initialization arguments apply to the majority of Model
subclasses by default (exceptions include specialized utility models
like `~astropy.modeling.mappings.Mapping`). Parametric models take all
their parameters as arguments, followed by any of the following optional
keyword arguments:
Parameters
----------
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict, optional
An optional dict of user-defined metadata to attach to this model.
How this is used and interpreted is up to the user or individual use
case.
n_models : int, optional
If given an integer greater than 1, a *model set* is instantiated
instead of a single model. This affects how the parameter arguments
are interpreted. In this case each parameter must be given as a list
or array--elements of this array are taken along the first axis (or
``model_set_axis`` if specified), such that the Nth element is the
value of that parameter for the Nth model in the set.
See the section on model sets in the documentation for more details.
model_set_axis : int, optional
This argument only applies when creating a model set (i.e. ``n_models >
1``). It changes how parameter values are interpreted. Normally the
first axis of each input parameter array (properly the 0th axis) is
taken as the axis corresponding to the model sets. However, any axis
of an input array may be taken as this "model set axis". This accepts
negative integers as well--for example use ``model_set_axis=-1`` if the
last (most rapidly changing) axis should be associated with the model
sets. Also, ``model_set_axis=False`` can be used to tell that a given
input should be used to evaluate all the models in the model set.
fixed : dict, optional
Dictionary ``{parameter_name: bool}`` setting the fixed constraint
for one or more parameters. `True` means the parameter is held fixed
during fitting and is prevented from updates once an instance of the
model has been created.
Alternatively the `~astropy.modeling.Parameter.fixed` property of a
parameter may be used to lock or unlock individual parameters.
tied : dict, optional
Dictionary ``{parameter_name: callable}`` of parameters which are
linked to some other parameter. The dictionary values are callables
providing the linking relationship.
Alternatively the `~astropy.modeling.Parameter.tied` property of a
parameter may be used to set the ``tied`` constraint on individual
parameters.
bounds : dict, optional
A dictionary ``{parameter_name: value}`` of lower and upper bounds of
parameters. Keys are parameter names. Values are a list or a tuple
of length 2 giving the desired range for the parameter.
Alternatively the `~astropy.modeling.Parameter.min` and
`~astropy.modeling.Parameter.max` or
~astropy.modeling.Parameter.bounds` properties of a parameter may be
used to set bounds on individual parameters.
eqcons : list, optional
List of functions of length n such that ``eqcons[j](x0, *args) == 0.0``
in a successfully optimized problem.
ineqcons : list, optional
List of functions of length n such that ``ieqcons[j](x0, *args) >=
0.0`` is a successfully optimized problem.
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that ``'mean'`` is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
"""
parameter_constraints = Parameter.constraints
"""
Primarily for informational purposes, these are the types of constraints
that can be set on a model's parameters.
"""
model_constraints = ('eqcons', 'ineqcons')
"""
Primarily for informational purposes, these are the types of constraints
that constrain model evaluation.
"""
param_names = ()
"""
Names of the parameters that describe models of this type.
The parameters in this tuple are in the same order they should be passed in
when initializing a model of a specific type. Some types of models, such
as polynomial models, have a different number of parameters depending on
some other property of the model, such as the degree.
When defining a custom model class the value of this attribute is
automatically set by the `~astropy.modeling.Parameter` attributes defined
in the class body.
"""
n_inputs = 0
"""The number of inputs."""
n_outputs = 0
""" The number of outputs."""
standard_broadcasting = True
fittable = False
linear = True
_separable = None
""" A boolean flag to indicate whether a model is separable."""
meta = metadata.MetaData()
"""A dict-like object to store optional information."""
# By default models either use their own inverse property or have no
# inverse at all, but users may also assign a custom inverse to a model,
# optionally; in that case it is of course up to the user to determine
# whether their inverse is *actually* an inverse to the model they assign
# it to.
_inverse = None
_user_inverse = None
_bounding_box = None
_user_bounding_box = None
# Default n_models attribute, so that __len__ is still defined even when a
# model hasn't completed initialization yet
_n_models = 1
# New classes can set this as a boolean value.
# It is converted to a dictionary mapping input name to a boolean value.
_input_units_strict = False
# Allow dimensionless input (and corresponding output). If this is True,
# input values to evaluate will gain the units specified in input_units. If
# this is a dictionary then it should map input name to a bool to allow
# dimensionless numbers for that input.
# Only has an effect if input_units is defined.
_input_units_allow_dimensionless = False
# Default equivalencies to apply to input values. If set, this should be a
# dictionary where each key is a string that corresponds to one of the
# model inputs. Only has an effect if input_units is defined.
input_units_equivalencies = None
def __init__(self, *args, meta=None, name=None, **kwargs):
super().__init__()
self._default_inputs_outputs()
if meta is not None:
self.meta = meta
self._name = name
# add parameters to instance level by walking MRO list
mro = self.__class__.__mro__
for cls in mro:
if issubclass(cls, Model):
for parname, val in cls._parameters_.items():
newpar = copy.deepcopy(val)
newpar.model = self
if parname not in self.__dict__:
self.__dict__[parname] = newpar
self._initialize_constraints(kwargs)
# Remaining keyword args are either parameter values or invalid
# Parameter values must be passed in as keyword arguments in order to
# distinguish them
self._initialize_parameters(args, kwargs)
self._initialize_slices()
self._initialize_unit_support()
# Raise DeprecationWarning on classes with class attributes
# ``inputs`` and ``outputs``.
self._inputs_deprecation()
def _inputs_deprecation(self):
if hasattr(self.__class__, 'inputs') and isinstance(self.__class__.inputs, tuple):
warnings.warn(
f"""Class {self.__class__.__name__} defines class attributes ``inputs``.
This has been deprecated in v4.0 and support will be removed in v4.1.
Starting with v4.0 classes must define a class attribute ``n_inputs``.
Please consult the documentation for details.
""", AstropyDeprecationWarning)
def _default_inputs_outputs(self):
if self.n_inputs == 1 and self.n_outputs == 1:
self._inputs = ("x",)
self._outputs = ("y",)
elif self.n_inputs == 2 and self.n_outputs == 1:
self._inputs = ("x", "y")
self._outputs = ("z",)
else:
try:
self._inputs = tuple("x" + str(idx) for idx in range(self.n_inputs))
self._outputs = tuple("x" + str(idx) for idx in range(self.n_outputs))
except TypeError:
# self.n_inputs and self.n_outputs are properties
# This is the case when subclasses of Model do not define
# ``n_inputs``, ``n_outputs``, ``inputs`` or ``outputs``.
self._inputs = ()
self._outputs = ()
@property
def inputs(self):
return self._inputs
@inputs.setter
def inputs(self, val):
if len(val) != self.n_inputs:
raise ValueError(f"Expected {self.n_inputs} number of inputs, got {len(val)}.")
self._inputs = val
self._initialize_unit_support()
@property
def outputs(self):
return self._outputs
@outputs.setter
def outputs(self, val):
if len(val) != self.n_outputs:
raise ValueError(f"Expected {self.n_outputs} number of outputs, got {len(val)}.")
self._outputs = val
@property
def n_inputs(self):
# TODO: remove the code in the ``if`` block when support
# for models with ``inputs`` as class variables is removed.
if hasattr(self.__class__, 'n_inputs') and isinstance(self.__class__.n_inputs, property):
try:
return len(self.__class__.inputs)
except TypeError:
try:
return len(self.inputs)
except AttributeError:
return 0
return self.__class__.n_inputs
@property
def n_outputs(self):
# TODO: remove the code in the ``if`` block when support
# for models with ``outputs`` as class variables is removed.
if hasattr(self.__class__, 'n_outputs') and isinstance(self.__class__.n_outputs, property):
try:
return len(self.__class__.outputs)
except TypeError:
try:
return len(self.outputs)
except AttributeError:
return 0
return self.__class__.n_outputs
def _initialize_unit_support(self):
"""
Convert self._input_units_strict and
self.input_units_allow_dimensionless to dictionaries
mapping input name to a boolean value.
"""
if isinstance(self._input_units_strict, bool):
self._input_units_strict = {key: self._input_units_strict for
key in self.inputs}
if isinstance(self._input_units_allow_dimensionless, bool):
self._input_units_allow_dimensionless = {key: self._input_units_allow_dimensionless
for key in self.inputs}
@property
def input_units_strict(self):
"""
Enforce strict units on inputs to evaluate. If this is set to True,
input values to evaluate will be in the exact units specified by
input_units. If the input quantities are convertible to input_units,
they are converted. If this is a dictionary then it should map input
name to a bool to set strict input units for that parameter.
"""
val = self._input_units_strict
if isinstance(val, bool):
return {key: val for key in self.inputs}
return dict(zip(self.inputs, val.values()))
@property
def input_units_allow_dimensionless(self):
"""
Allow dimensionless input (and corresponding output). If this is True,
input values to evaluate will gain the units specified in input_units. If
this is a dictionary then it should map input name to a bool to allow
dimensionless numbers for that input.
Only has an effect if input_units is defined.
"""
val = self._input_units_allow_dimensionless
if isinstance(val, bool):
return {key: val for key in self.inputs}
return dict(zip(self.inputs, val.values()))
@property
def uses_quantity(self):
"""
True if this model has been created with `~astropy.units.Quantity`
objects or if there are no parameters.
This can be used to determine if this model should be evaluated with
`~astropy.units.Quantity` or regular floats.
"""
pisq = [isinstance(p, Quantity) for p in self._param_sets(units=True)]
return (len(pisq) == 0) or any(pisq)
def __repr__(self):
return self._format_repr()
def __str__(self):
return self._format_str()
def __len__(self):
return self._n_models
def __setattr__(self, attr, value):
if isinstance(self, CompoundModel):
param_names = self._param_names
param_names = self.param_names
if param_names is not None and attr in self.param_names:
param = self.__dict__[attr]
value = _tofloat(value)
if param._validator is not None:
param._validator(self, value)
# check consistency with previous shape and size
eshape = self._param_metrics[attr]['shape']
if eshape == ():
eshape = (1,)
vshape = np.array(value).shape
if vshape == ():
vshape = (1,)
esize = self._param_metrics[attr]['size']
if (np.size(value) != esize or
_strip_ones(vshape) != _strip_ones(eshape)):
raise InputParameterError(
"Value for parameter {0} does not match shape or size\n"
"expected by model ({1}, {2}) vs ({3}, {4})".format(
attr, vshape, np.size(value), eshape, esize))
if param.unit is None:
if isinstance(value, Quantity):
param._unit = value.unit
param.value = value.value
else:
param.value = value
else:
if not isinstance(value, Quantity):
raise UnitsError(f"The '{param.name}' parameter should be given as a"
" Quantity because it was originally "
"initialized as a Quantity")
param._unit = value.unit
param.value = value.value
else:
if attr in ['fittable', 'linear']:
self.__dict__[attr] = value
else:
super().__setattr__(attr, value)
def __call__(self, *args, **kwargs):
"""
Evaluate this model using the given input(s) and the parameter values
that were specified when the model was instantiated.
"""
new_args, kwargs = self._get_renamed_inputs_as_positional(*args, **kwargs)
return generic_call(self, *new_args, **kwargs)
def _get_renamed_inputs_as_positional(self, *args, **kwargs):
def _keyword2positional(kwargs):
# Inputs were passed as keyword (not positional) arguments.
# Because the signature of the ``__call__`` is defined at
# the class level, the name of the inputs cannot be changed at
# the instance level and the old names are always present in the
# signature of the method. In order to use the new names of the
# inputs, the old names are taken out of ``kwargs``, the input
# values are sorted in the order of self.inputs and passed as
# positional arguments to ``__call__``.
# These are the keys that are always present as keyword arguments.
keys = ['model_set_axis', 'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map']
new_inputs = {}
# kwargs contain the names of the new inputs + ``keys``
allkeys = list(kwargs.keys())
# Remove the names of the new inputs from kwargs and save them
# to a dict ``new_inputs``.
for key in allkeys:
if key not in keys:
new_inputs[key] = kwargs[key]
del kwargs[key]
return new_inputs, kwargs
n_args = len(args)
new_inputs, kwargs = _keyword2positional(kwargs)
n_all_args = n_args + len(new_inputs)
if n_all_args < self.n_inputs:
raise ValueError(f"Missing input arguments - expected {self.n_inputs}, got {n_all_args}")
elif n_all_args > self.n_inputs:
raise ValueError(f"Too many input arguments - expected {self.n_inputs}, got {n_all_args}")
if n_args == 0:
# Create positional arguments from the keyword arguments in ``new_inputs``.
new_args = []
for k in self.inputs:
new_args.append(new_inputs[k])
elif n_args != self.n_inputs:
# Some inputs are passed as positional, others as keyword arguments.
args = list(args)
# Create positional arguments from the keyword arguments in ``new_inputs``.
new_args = []
for k in self.inputs:
if k in new_inputs:
new_args.append(new_inputs[k])
else:
new_args.append(args[0])
del args[0]
else:
new_args = args
return new_args, kwargs
# *** Properties ***
@property
def name(self):
"""User-provided name for this model instance."""
return self._name
@name.setter
def name(self, val):
"""Assign a (new) name to this model."""
self._name = val
@property
def model_set_axis(self):
"""
The index of the model set axis--that is the axis of a parameter array
that pertains to which model a parameter value pertains to--as
specified when the model was initialized.
See the documentation on :ref:`modeling-model-sets`
for more details.
"""
return self._model_set_axis
@property
def param_sets(self):
"""
Return parameters as a pset.
This is a list with one item per parameter set, which is an array of
that parameter's values across all parameter sets, with the last axis
associated with the parameter set.
"""
return self._param_sets()
@property
def parameters(self):
"""
A flattened array of all parameter values in all parameter sets.
Fittable parameters maintain this list and fitters modify it.
"""
# Currently the sequence of a model's parameters must be contiguous
# within the _parameters array (which may be a view of a larger array,
# for example when taking a sub-expression of a compound model), so
# the assumption here is reliable:
if not self.param_names:
# Trivial, but not unheard of
return self._parameters
self._parameters_to_array()
start = self._param_metrics[self.param_names[0]]['slice'].start
stop = self._param_metrics[self.param_names[-1]]['slice'].stop
return self._parameters[start:stop]
@parameters.setter
def parameters(self, value):
"""
Assigning to this attribute updates the parameters array rather than
replacing it.
"""
if not self.param_names:
return
start = self._param_metrics[self.param_names[0]]['slice'].start
stop = self._param_metrics[self.param_names[-1]]['slice'].stop
try:
value = np.array(value).flatten()
self._parameters[start:stop] = value
except ValueError as e:
raise InputParameterError(
"Input parameter values not compatible with the model "
"parameters array: {0}".format(e))
self._array_to_parameters()
@property
def fixed(self):
"""
A ``dict`` mapping parameter names to their fixed constraint.
"""
return _ConstraintsDict(self, 'fixed')
@property
def bounds(self):
"""
A ``dict`` mapping parameter names to their upper and lower bounds as
``(min, max)`` tuples or ``[min, max]`` lists.
"""
return _ConstraintsDict(self, 'bounds')
@property
def tied(self):
"""
A ``dict`` mapping parameter names to their tied constraint.
"""
return _ConstraintsDict(self, 'tied')
@property
def eqcons(self):
"""List of parameter equality constraints."""
return self._mconstraints['eqcons']
@property
def ineqcons(self):
"""List of parameter inequality constraints."""
return self._mconstraints['ineqcons']
@property
def inverse(self):
"""
Returns a new `~astropy.modeling.Model` instance which performs the
inverse transform, if an analytic inverse is defined for this model.
Even on models that don't have an inverse defined, this property can be
set with a manually-defined inverse, such a pre-computed or
experimentally determined inverse (often given as a
`~astropy.modeling.polynomial.PolynomialModel`, but not by
requirement).
A custom inverse can be deleted with ``del model.inverse``. In this
case the model's inverse is reset to its default, if a default exists
(otherwise the default is to raise `NotImplementedError`).
Note to authors of `~astropy.modeling.Model` subclasses: To define an
inverse for a model simply override this property to return the
appropriate model representing the inverse. The machinery that will
make the inverse manually-overridable is added automatically by the
base class.
"""
if self._user_inverse is not None:
return self._user_inverse
elif self._inverse is not None:
return self._inverse()
raise NotImplementedError("An analytical inverse transform has not "
"been implemented for this model.")
@inverse.setter
def inverse(self, value):
if not isinstance(value, (Model, type(None))):
raise ValueError(
"The ``inverse`` attribute may be assigned a `Model` "
"instance or `None` (where `None` explicitly forces the "
"model to have no inverse.")
self._user_inverse = value
@inverse.deleter
def inverse(self):
"""
Resets the model's inverse to its default (if one exists, otherwise
the model will have no inverse).
"""
del self._user_inverse
@property
def has_user_inverse(self):
"""
A flag indicating whether or not a custom inverse model has been
assigned to this model by a user, via assignment to ``model.inverse``.
"""
return self._user_inverse is not None
@property
def bounding_box(self):
r"""
A `tuple` of length `n_inputs` defining the bounding box limits, or
`None` for no bounding box.
The default limits are given by a ``bounding_box`` property or method
defined in the class body of a specific model. If not defined then
this property just raises `NotImplementedError` by default (but may be
assigned a custom value by a user). ``bounding_box`` can be set
manually to an array-like object of shape ``(model.n_inputs, 2)``. For
further usage, see :ref:`bounding-boxes`
The limits are ordered according to the `numpy` indexing
convention, and are the reverse of the model input order,
e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined:
* for 1D: ``(x_low, x_high)``
* for 2D: ``((y_low, y_high), (x_low, x_high))``
* for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))``
Examples
--------
Setting the ``bounding_box`` limits for a 1D and 2D model:
>>> from astropy.modeling.models import Gaussian1D, Gaussian2D
>>> model_1d = Gaussian1D()
>>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1)
>>> model_1d.bounding_box = (-5, 5)
>>> model_2d.bounding_box = ((-6, 6), (-5, 5))
Setting the bounding_box limits for a user-defined 3D `custom_model`:
>>> from astropy.modeling.models import custom_model
>>> def const3d(x, y, z, amp=1):
... return amp
...
>>> Const3D = custom_model(const3d)
>>> model_3d = Const3D()
>>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4))
To reset ``bounding_box`` to its default limits just delete the
user-defined value--this will reset it back to the default defined
on the class:
>>> del model_1d.bounding_box
To disable the bounding box entirely (including the default),
set ``bounding_box`` to `None`:
>>> model_1d.bounding_box = None
>>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError: No bounding box is defined for this model
(note: the bounding box was explicitly disabled for this model;
use `del model.bounding_box` to restore the default bounding box,
if one is defined for this model).
"""
if self._user_bounding_box is not None:
if self._user_bounding_box is NotImplemented:
raise NotImplementedError(
"No bounding box is defined for this model (note: the "
"bounding box was explicitly disabled for this model; "
"use `del model.bounding_box` to restore the default "
"bounding box, if one is defined for this model).")
return self._user_bounding_box
elif self._bounding_box is None:
raise NotImplementedError(
"No bounding box is defined for this model.")
elif isinstance(self._bounding_box, _BoundingBox):
# This typically implies a hard-coded bounding box. This will
# probably be rare, but it is an option
return self._bounding_box
elif isinstance(self._bounding_box, types.MethodType):
return self._bounding_box()
else:
# The only other allowed possibility is that it's a _BoundingBox
# subclass, so we call it with its default arguments and return an
# instance of it (that can be called to recompute the bounding box
# with any optional parameters)
# (In other words, in this case self._bounding_box is a *class*)
bounding_box = self._bounding_box((), _model=self)()
return self._bounding_box(bounding_box, _model=self)
@bounding_box.setter
def bounding_box(self, bounding_box):
"""
Assigns the bounding box limits.
"""
if bounding_box is None:
cls = None
# We use this to explicitly set an unimplemented bounding box (as
# opposed to no user bounding box defined)
bounding_box = NotImplemented
elif (isinstance(self._bounding_box, type) and
issubclass(self._bounding_box, _BoundingBox)):
cls = self._bounding_box
else:
cls = _BoundingBox
if cls is not None:
try:
bounding_box = cls.validate(self, bounding_box)
except ValueError as exc:
raise ValueError(exc.args[0])
self._user_bounding_box = bounding_box
@bounding_box.deleter
def bounding_box(self):
self._user_bounding_box = None
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
@property
def separable(self):
""" A flag indicating whether a model is separable."""
if self._separable is not None:
return self._separable
raise NotImplementedError(
'The "separable" property is not defined for '
'model {}'.format(self.__class__.__name__))
# *** Public methods ***
def without_units_for_data(self, **kwargs):
"""
Return an instance of the model for which the parameter values have
been converted to the right units for the data, then the units have
been stripped away.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters should be converted to are not
necessarily the units of the input data, but are derived from them.
Model subclasses that want fitting to work in the presence of
quantities need to define a ``_parameter_units_for_data_units`` method
that takes the input and output units (as two dictionaries) and
returns a dictionary giving the target units for each parameter.
For compound models this will only work when the expression only
involves the addition or subtraction operators.
"""
if isinstance(self, CompoundModel):
self._make_opset()
if not self._opset.issubset(set(('+', '-'))):
raise ValueError(
"Fitting a compound model without units can only be performed on"
"compound models that only use the arithmetic operators + and -")
model = self.copy()
inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)
for inp in self.inputs if kwargs[inp] is not None}
outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)
for out in self.outputs if kwargs[out] is not None}
parameter_units = self._parameter_units_for_data_units(inputs_unit,
outputs_unit)
for name, unit in parameter_units.items():
parameter = getattr(model, name)
if parameter.unit is not None:
parameter.value = parameter.quantity.to(unit).value
parameter._set_unit(None, force=True)
if isinstance(model, CompoundModel):
model.strip_units_from_tree()
return model
def strip_units_from_tree(self):
for item in self._leaflist:
for parname in item.param_names:
par = getattr(item, parname)
par._set_unit(None, force=True)
def with_units_from_data(self, **kwargs):
"""
Return an instance of the model which has units for which the parameter
values are compatible with the data units specified.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters will gain are not necessarily the units
of the input data, but are derived from them. Model subclasses that
want fitting to work in the presence of quantities need to define a
``_parameter_units_for_data_units`` method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)
for inp in self.inputs if kwargs[inp] is not None}
outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)
for out in self.outputs if kwargs[out] is not None}
parameter_units = self._parameter_units_for_data_units(inputs_unit,
outputs_unit)
# We are adding units to parameters that already have a value, but we
# don't want to convert the parameter, just add the unit directly,
# hence the call to ``_set_unit``.
for name, unit in parameter_units.items():
parameter = getattr(model, name)
parameter._set_unit(unit, force=True)
return model
@property
def _has_units(self):
# Returns True if any of the parameters have units
for param in self.param_names:
if getattr(self, param).unit is not None:
return True
else:
return False
@property
def _supports_unit_fitting(self):
# If the model has a ``_parameter_units_for_data_units`` method, this
# indicates that we have enough information to strip the units away
# and add them back after fitting, when fitting quantities
return hasattr(self, '_parameter_units_for_data_units')
@abc.abstractmethod
def evaluate(self, *args, **kwargs):
"""Evaluate the model on some input variables."""
def sum_of_implicit_terms(self, *args, **kwargs):
"""
Evaluate the sum of any implicit model terms on some input variables.
This includes any fixed terms used in evaluating a linear model that
do not have corresponding parameters exposed to the user. The
prototypical case is `astropy.modeling.functional_models.Shift`, which
corresponds to a function y = a + bx, where b=1 is intrinsically fixed
by the type of model, such that sum_of_implicit_terms(x) == x. This
method is needed by linear fitters to correct the dependent variable
for the implicit term(s) when solving for the remaining terms
(ie. a = y - bx).
"""
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array_like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of
the returned array. If this is not provided (or None), the model
will be evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be
passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of
this model is not set.
Examples
--------
:ref:`bounding-boxes`
"""
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError('If no bounding_box is set, '
'coords or out must be input.')
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError('inconsistent shape of the output.')
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out, dtype=float)
if out.ndim != ndim:
raise ValueError('the array and model must have the same '
'number of dimensions.')
if bbox is not None:
# Assures position is at center pixel,
# important when using add_array.
pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))
for bb in bbox]).astype(int).T
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos)
for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
'The `bounding_box` is larger than the input out in '
'one or more dimensions. Set '
'`model.bounding_box = None`.')
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
@property
def input_units(self):
"""
This property is used to indicate what units or sets of units the
evaluate method expects, and returns a dictionary mapping inputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid input units, in which case this property should
not be overridden since it will return the input units based on the
annotations.
"""
if hasattr(self, '_input_units'):
return self._input_units
elif hasattr(self.evaluate, '__annotations__'):
annotations = self.evaluate.__annotations__.copy()
annotations.pop('return', None)
if annotations:
# If there are not annotations for all inputs this will error.
return dict((name, annotations[name]) for name in self.inputs)
else:
# None means any unit is accepted
return None
@property
def return_units(self):
"""
This property is used to indicate what units or sets of units the
output of evaluate should be in, and returns a dictionary mapping
outputs to units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid output units, in which case this property should not be
overridden since it will return the return units based on the
annotations.
"""
if hasattr(self, '_return_units'):
return self._return_units
elif hasattr(self.evaluate, '__annotations__'):
return self.evaluate.__annotations__.get('return', None)
else:
# None means any unit is accepted
return None
def prepare_inputs(self, *inputs, model_set_axis=None, equivalencies=None,
**kwargs):
"""
This method is used in `~astropy.modeling.Model.__call__` to ensure
that all the inputs to the model can be broadcast into compatible
shapes (if one or both of them are input as arrays), particularly if
there are more than one parameter sets. This also makes sure that (if
applicable) the units of the input will be compatible with the evaluate
method.
"""
# When we instantiate the model class, we make sure that __call__ can
# take the following two keyword arguments: model_set_axis and
# equivalencies.
if model_set_axis is None:
# By default the model_set_axis for the input is assumed to be the
# same as that for the parameters the model was defined with
# TODO: Ensure that negative model_set_axis arguments are respected
model_set_axis = self.model_set_axis
n_models = len(self)
params = [getattr(self, name) for name in self.param_names]
inputs = [np.asanyarray(_input, dtype=float) for _input in inputs]
_validate_input_shapes(inputs, self.inputs, n_models,
model_set_axis, self.standard_broadcasting)
inputs_map = kwargs.get('inputs_map', None)
inputs = self._validate_input_units(inputs, equivalencies, inputs_map)
# The input formatting required for single models versus a multiple
# model set are different enough that they've been split into separate
# subroutines
if n_models == 1:
return _prepare_inputs_single_model(self, params, inputs,
**kwargs)
else:
return _prepare_inputs_model_set(self, params, inputs, n_models,
model_set_axis, **kwargs)
def _validate_input_units(self, inputs, equivalencies=None, inputs_map=None):
inputs = list(inputs)
name = self.name or self.__class__.__name__
# Check that the units are correct, if applicable
if self.input_units is not None:
# If a leaflist is provided that means this is in the context of
# a compound model and it is necessary to create the appropriate
# alias for the input coordinate name for the equivalencies dict
if inputs_map:
edict = {}
for mod, mapping in inputs_map:
if self is mod:
edict[mapping[0]] = equivalencies[mapping[1]]
else:
edict = equivalencies
# We combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(self.inputs,
edict,
self.input_units_equivalencies)
# We now iterate over the different inputs and make sure that their
# units are consistent with those specified in input_units.
for i in range(len(inputs)):
input_name = self.inputs[i]
input_unit = self.input_units.get(input_name, None)
if input_unit is None:
continue
if isinstance(inputs[i], Quantity):
# We check for consistency of the units with input_units,
# taking into account any equivalencies
if inputs[i].unit.is_equivalent(
input_unit,
equivalencies=input_units_equivalencies[input_name]):
# If equivalencies have been specified, we need to
# convert the input to the input units - this is
# because some equivalencies are non-linear, and
# we need to be sure that we evaluate the model in
# its own frame of reference. If input_units_strict
# is set, we also need to convert to the input units.
if len(input_units_equivalencies) > 0 or self.input_units_strict[input_name]:
inputs[i] = inputs[i].to(input_unit,
equivalencies=input_units_equivalencies[input_name])
else:
# We consider the following two cases separately so as
# to be able to raise more appropriate/nicer exceptions
if input_unit is dimensionless_unscaled:
raise UnitsError("{0}: Units of input '{1}', {2} ({3}),"
"could not be converted to "
"required dimensionless "
"input".format(name,
self.inputs[i],
inputs[i].unit,
inputs[i].unit.physical_type))
else:
raise UnitsError("{0}: Units of input '{1}', {2} ({3}),"
" could not be "
"converted to required input"
" units of {4} ({5})".format(
name,
self.inputs[i],
inputs[i].unit,
inputs[i].unit.physical_type,
input_unit,
input_unit.physical_type))
else:
# If we allow dimensionless input, we add the units to the
# input values without conversion, otherwise we raise an
# exception.
if (not self.input_units_allow_dimensionless[input_name] and
input_unit is not dimensionless_unscaled and
input_unit is not None):
if np.any(inputs[i] != 0):
raise UnitsError("{0}: Units of input '{1}', (dimensionless), could not be "
"converted to required input units of "
"{2} ({3})".format(name, self.inputs[i], input_unit,
input_unit.physical_type))
return inputs
def _process_output_units(self, inputs, outputs):
inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs])
if self.return_units and inputs_are_quantity:
# We allow a non-iterable unit only if there is one output
if self.n_outputs == 1 and not isiterable(self.return_units):
return_units = {self.outputs[0]: self.return_units}
else:
return_units = self.return_units
outputs = tuple([Quantity(out, return_units.get(out_name, None), subok=True)
for out, out_name in zip(outputs, self.outputs)])
return outputs
def prepare_outputs(self, format_info, *outputs, **kwargs):
model_set_axis = kwargs.get('model_set_axis', None)
if len(self) == 1:
return _prepare_outputs_single_model(outputs, format_info)
else:
return _prepare_outputs_model_set(self, outputs, format_info, model_set_axis)
def copy(self):
"""
Return a copy of this model.
Uses a deep copy so that all model attributes, including parameter
values, are copied as well.
"""
return copy.deepcopy(self)
def deepcopy(self):
"""
Return a deep copy of this model.
"""
return copy.deepcopy(self)
@sharedmethod
def rename(self, name):
"""
Return a copy of this model with a new name.
"""
new_model = self.copy()
new_model._name = name
return new_model
def coerce_units(
self,
input_units=None,
return_units=None,
input_units_equivalencies=None,
input_units_allow_dimensionless=False
):
"""
Attach units to this (unitless) model.
Parameters
----------
input_units : dict or tuple, optional
Input units to attach. If dict, each key is the name of a model input,
and the value is the unit to attach. If tuple, the elements are units
to attach in order corresponding to `Model.inputs`.
return_units : dict or tuple, optional
Output units to attach. If dict, each key is the name of a model output,
and the value is the unit to attach. If tuple, the elements are units
to attach in order corresponding to `Model.outputs`.
input_units_equivalencies : dict, optional
Default equivalencies to apply to input values. If set, this should be a
dictionary where each key is a string that corresponds to one of the
model inputs.
input_units_allow_dimensionless : bool or dict, optional
Allow dimensionless input. If this is True, input values to evaluate will
gain the units specified in input_units. If this is a dictionary then it
should map input name to a bool to allow dimensionless numbers for that
input.
Returns
-------
`CompoundModel`
A `CompoundModel` composed of the current model plus
`~astropy.modeling.mappings.UnitsMapping` model(s) that attach the units.
Raises
------
ValueError
If the current model already has units.
Examples
--------
Wrapping a unitless model to require and convert units:
>>> from astropy.modeling.models import Polynomial1D
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = poly.coerce_units((u.m,), (u.s,))
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP
<Quantity 1.2 s>
Wrapping a unitless model but still permitting unitless input:
>>> from astropy.modeling.models import Polynomial1D
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = poly.coerce_units((u.m,), (u.s,), input_units_allow_dimensionless=True)
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(10) # doctest: +FLOAT_CMP
<Quantity 21. s>
"""
from .mappings import UnitsMapping
result = self
if input_units is not None:
if self.input_units is not None:
model_units = self.input_units
else:
model_units = {}
for unit in [model_units.get(i) for i in self.inputs]:
if unit is not None and unit != dimensionless_unscaled:
raise ValueError("Cannot specify input_units for model with existing input units")
if isinstance(input_units, dict):
if input_units.keys() != set(self.inputs):
message = (
f"""input_units keys ({", ".join(input_units.keys())}) """
f"""do not match model inputs ({", ".join(self.inputs)})"""
)
raise ValueError(message)
input_units = [input_units[i] for i in self.inputs]
if len(input_units) != self.n_inputs:
message = (
"input_units length does not match n_inputs: "
f"expected {self.n_inputs}, received {len(input_units)}"
)
raise ValueError(message)
mapping = tuple((unit, model_units.get(i)) for i, unit in zip(self.inputs, input_units))
input_mapping = UnitsMapping(
mapping,
input_units_equivalencies=input_units_equivalencies,
input_units_allow_dimensionless=input_units_allow_dimensionless
)
input_mapping.inputs = self.inputs
input_mapping.outputs = self.inputs
result = input_mapping | result
if return_units is not None:
if self.return_units is not None:
model_units = self.return_units
else:
model_units = {}
for unit in [model_units.get(i) for i in self.outputs]:
if unit is not None and unit != dimensionless_unscaled:
raise ValueError("Cannot specify return_units for model with existing output units")
if isinstance(return_units, dict):
if return_units.keys() != set(self.outputs):
message = (
f"""return_units keys ({", ".join(return_units.keys())}) """
f"""do not match model outputs ({", ".join(self.outputs)})"""
)
raise ValueError(message)
return_units = [return_units[i] for i in self.outputs]
if len(return_units) != self.n_outputs:
message = (
"return_units length does not match n_outputs: "
f"expected {self.n_outputs}, received {len(return_units)}"
)
raise ValueError(message)
mapping = tuple((model_units.get(i), unit) for i, unit in zip(self.outputs, return_units))
return_mapping = UnitsMapping(mapping)
return_mapping.inputs = self.outputs
return_mapping.outputs = self.outputs
result = result | return_mapping
return result
@property
def n_submodels(self):
"""
Return the number of components in a single model, which is
obviously 1.
"""
return 1
def _initialize_constraints(self, kwargs):
"""
Pop parameter constraint values off the keyword arguments passed to
`Model.__init__` and store them in private instance attributes.
"""
# Pop any constraints off the keyword arguments
for constraint in self.parameter_constraints:
values = kwargs.pop(constraint, {})
for ckey, cvalue in values.items():
param = getattr(self, ckey)
setattr(param, constraint, cvalue)
self._mconstraints = {}
for constraint in self.model_constraints:
values = kwargs.pop(constraint, [])
self._mconstraints[constraint] = values
def _initialize_parameters(self, args, kwargs):
"""
Initialize the _parameters array that stores raw parameter values for
all parameter sets for use with vectorized fitting algorithms; on
FittableModels the _param_name attributes actually just reference
slices of this array.
"""
n_models = kwargs.pop('n_models', None)
if not (n_models is None or
(isinstance(n_models, (int, np.integer)) and n_models >= 1)):
raise ValueError(
"n_models must be either None (in which case it is "
"determined from the model_set_axis of the parameter initial "
"values) or it must be a positive integer "
"(got {0!r})".format(n_models))
model_set_axis = kwargs.pop('model_set_axis', None)
if model_set_axis is None:
if n_models is not None and n_models > 1:
# Default to zero
model_set_axis = 0
else:
# Otherwise disable
model_set_axis = False
else:
if not (model_set_axis is False or
(isinstance(model_set_axis, int) and
not isinstance(model_set_axis, bool))):
raise ValueError(
"model_set_axis must be either False or an integer "
"specifying the parameter array axis to map to each "
"model in a set of models (got {0!r}).".format(
model_set_axis))
# Process positional arguments by matching them up with the
# corresponding parameters in self.param_names--if any also appear as
# keyword arguments this presents a conflict
params = set()
if len(args) > len(self.param_names):
raise TypeError(
"{0}.__init__() takes at most {1} positional arguments ({2} "
"given)".format(self.__class__.__name__, len(self.param_names),
len(args)))
self._model_set_axis = model_set_axis
self._param_metrics = defaultdict(dict)
for idx, arg in enumerate(args):
if arg is None:
# A value of None implies using the default value, if exists
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
param_name = self.param_names[idx]
params.add(param_name)
if not isinstance(arg, Parameter):
value = quantity_asanyarray(arg, dtype=float)
else:
value = arg
self._initialize_parameter_value(param_name, value)
# At this point the only remaining keyword arguments should be
# parameter names; any others are in error.
for param_name in self.param_names:
if param_name in kwargs:
if param_name in params:
raise TypeError(
"{0}.__init__() got multiple values for parameter "
"{1!r}".format(self.__class__.__name__, param_name))
value = kwargs.pop(param_name)
if value is None:
continue
# We use quantity_asanyarray here instead of np.asanyarray
# because if any of the arguments are quantities, we need
# to return a Quantity object not a plain Numpy array.
value = quantity_asanyarray(value, dtype=float)
params.add(param_name)
self._initialize_parameter_value(param_name, value)
# Now deal with case where param_name is not supplied by args or kwargs
for param_name in self.param_names:
if param_name not in params:
self._initialize_parameter_value(param_name, None)
if kwargs:
# If any keyword arguments were left over at this point they are
# invalid--the base class should only be passed the parameter
# values, constraints, and param_dim
for kwarg in kwargs:
# Just raise an error on the first unrecognized argument
raise TypeError(
'{0}.__init__() got an unrecognized parameter '
'{1!r}'.format(self.__class__.__name__, kwarg))
# Determine the number of model sets: If the model_set_axis is
# None then there is just one parameter set; otherwise it is determined
# by the size of that axis on the first parameter--if the other
# parameters don't have the right number of axes or the sizes of their
# model_set_axis don't match an error is raised
if model_set_axis is not False and n_models != 1 and params:
max_ndim = 0
if model_set_axis < 0:
min_ndim = abs(model_set_axis)
else:
min_ndim = model_set_axis + 1
for name in self.param_names:
value = getattr(self, name)
param_ndim = np.ndim(value)
if param_ndim < min_ndim:
raise InputParameterError(
"All parameter values must be arrays of dimension "
"at least {0} for model_set_axis={1} (the value "
"given for {2!r} is only {3}-dimensional)".format(
min_ndim, model_set_axis, name, param_ndim))
max_ndim = max(max_ndim, param_ndim)
if n_models is None:
# Use the dimensions of the first parameter to determine
# the number of model sets
n_models = value.shape[model_set_axis]
elif value.shape[model_set_axis] != n_models:
raise InputParameterError(
"Inconsistent dimensions for parameter {0!r} for "
"{1} model sets. The length of axis {2} must be the "
"same for all input parameter values".format(
name, n_models, model_set_axis))
self._check_param_broadcast(max_ndim)
else:
if n_models is None:
n_models = 1
self._check_param_broadcast(None)
self._n_models = n_models
# now validate parameters
for name in params:
param = getattr(self, name)
if param._validator is not None:
param._validator(self, param.value)
def _initialize_parameter_value(self, param_name, value):
"""Mostly deals with consistency checks and determining unit issues."""
if isinstance(value, Parameter):
self.__dict__[param_name] = value
return
param = getattr(self, param_name)
# Use default if value is not provided
if value is None:
default = param.default
if default is None:
# No value was supplied for the parameter and the
# parameter does not have a default, therefore the model
# is underspecified
raise TypeError("{0}.__init__() requires a value for parameter "
"{1!r}".format(self.__class__.__name__, param_name))
value = default
unit = param.unit
else:
if isinstance(value, Quantity):
unit = value.unit
value = value.value
else:
unit = None
if unit is None and param.unit is not None:
raise InputParameterError(
"{0}.__init__() requires a Quantity for parameter "
"{1!r}".format(self.__class__.__name__, param_name))
param._unit = unit
param.internal_unit = None
if param._setter is not None:
if unit is not None:
_val = param._setter(value * unit)
else:
_val = param._setter(value)
if isinstance(_val, Quantity):
param.internal_unit = _val.unit
param._internal_value = np.array(_val.value)
else:
param.internal_unit = None
param._internal_value = np.array(_val)
else:
param._value = np.array(value)
def _initialize_slices(self):
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name]['slice'] = param_slice
param_metrics[name]['shape'] = param_shape
param_metrics[name]['size'] = param_size
total_size += param_size
self._parameters = np.empty(total_size, dtype=np.float64)
def _parameters_to_array(self):
# Now set the parameter values (this will also fill
# self._parameters)
param_metrics = self._param_metrics
for name in self.param_names:
param = getattr(self, name)
value = param.value
if not isinstance(value, np.ndarray):
value = np.array([value])
self._parameters[param_metrics[name]['slice']] = value.ravel()
# Finally validate all the parameters; we do this last so that
# validators that depend on one of the other parameters' values will
# work
def _array_to_parameters(self):
param_metrics = self._param_metrics
for name in self.param_names:
param = getattr(self, name)
value = self._parameters[param_metrics[name]['slice']]
value.shape = param_metrics[name]['shape']
param.value = value
def _check_param_broadcast(self, max_ndim):
"""
This subroutine checks that all parameter arrays can be broadcast
against each other, and determines the shapes parameters must have in
order to broadcast correctly.
If model_set_axis is None this merely checks that the parameters
broadcast and returns an empty dict if so. This mode is only used for
single model sets.
"""
all_shapes = []
model_set_axis = self._model_set_axis
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_shape = np.shape(value)
param_ndim = len(param_shape)
if max_ndim is not None and param_ndim < max_ndim:
# All arrays have the same number of dimensions up to the
# model_set_axis dimension, but after that they may have a
# different number of trailing axes. The number of trailing
# axes must be extended for mutual compatibility. For example
# if max_ndim = 3 and model_set_axis = 0, an array with the
# shape (2, 2) must be extended to (2, 1, 2). However, an
# array with shape (2,) is extended to (2, 1).
new_axes = (1,) * (max_ndim - param_ndim)
if model_set_axis < 0:
# Just need to prepend axes to make up the difference
broadcast_shape = new_axes + param_shape
else:
broadcast_shape = (param_shape[:model_set_axis + 1] +
new_axes +
param_shape[model_set_axis + 1:])
self._param_metrics[name]['broadcast_shape'] = broadcast_shape
all_shapes.append(broadcast_shape)
else:
all_shapes.append(param_shape)
# Now check mutual broadcastability of all shapes
try:
check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
param_a = self.param_names[shape_a_idx]
param_b = self.param_names[shape_b_idx]
raise InputParameterError(
"Parameter {0!r} of shape {1!r} cannot be broadcast with "
"parameter {2!r} of shape {3!r}. All parameter arrays "
"must have shapes that are mutually compatible according "
"to the broadcasting rules.".format(param_a, shape_a,
param_b, shape_b))
def _param_sets(self, raw=False, units=False):
"""
Implementation of the Model.param_sets property.
This internal implementation has a ``raw`` argument which controls
whether or not to return the raw parameter values (i.e. the values that
are actually stored in the ._parameters array, as opposed to the values
displayed to users. In most cases these are one in the same but there
are currently a few exceptions.
Note: This is notably an overcomplicated device and may be removed
entirely in the near future.
"""
values = []
shapes = []
for name in self.param_names:
param = getattr(self, name)
if raw and param._setter:
value = param._internal_value
else:
value = param.value
broadcast_shape = self._param_metrics[name].get('broadcast_shape')
if broadcast_shape is not None:
value = value.reshape(broadcast_shape)
shapes.append(np.shape(value))
if len(self) == 1:
# Add a single param set axis to the parameter's value (thus
# converting scalars to shape (1,) array values) for
# consistency
value = np.array([value])
if units:
if raw and param.internal_unit is not None:
unit = param.internal_unit
else:
unit = param.unit
if unit is not None:
value = Quantity(value, unit)
values.append(value)
if len(set(shapes)) != 1 or units:
# If the parameters are not all the same shape, converting to an
# array is going to produce an object array
# However the way Numpy creates object arrays is tricky in that it
# will recurse into array objects in the list and break them up
# into separate objects. Doing things this way ensures a 1-D
# object array the elements of which are the individual parameter
# arrays. There's not much reason to do this over returning a list
# except for consistency
psets = np.empty(len(values), dtype=object)
psets[:] = values
return psets
return np.array(values)
def _format_repr(self, args=[], kwargs={}, defaults={}):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
parts = [repr(a) for a in args]
parts.extend(
"{0}={1}".format(name,
param_repr_oneline(getattr(self, name)))
for name in self.param_names)
if self.name is not None:
parts.append('name={0!r}'.format(self.name))
for kwarg, value in kwargs.items():
if kwarg in defaults and defaults[kwarg] == value:
continue
parts.append('{0}={1!r}'.format(kwarg, value))
if len(self) > 1:
parts.append("n_models={0}".format(len(self)))
return '<{0}({1})>'.format(self.__class__.__name__, ', '.join(parts))
def _format_str(self, keywords=[], defaults={}):
"""
Internal implementation of ``__str__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__str__`` while keeping the same basic
formatting.
"""
default_keywords = [
('Model', self.__class__.__name__),
('Name', self.name),
('Inputs', self.inputs),
('Outputs', self.outputs),
('Model set size', len(self))
]
parts = ['{0}: {1}'.format(keyword, value)
for keyword, value in default_keywords
if value is not None]
for keyword, value in keywords:
if keyword.lower() in defaults and defaults[keyword.lower()] == value:
continue
parts.append('{0}: {1}'.format(keyword, value))
parts.append('Parameters:')
if len(self) == 1:
columns = [[getattr(self, name).value]
for name in self.param_names]
else:
columns = [getattr(self, name).value
for name in self.param_names]
if columns:
param_table = Table(columns, names=self.param_names)
# Set units on the columns
for name in self.param_names:
param_table[name].unit = getattr(self, name).unit
parts.append(indent(str(param_table), width=4))
return '\n'.join(parts)
class FittableModel(Model):
"""
Base class for models that can be fitted using the built-in fitting
algorithms.
"""
linear = False
# derivative with respect to parameters
fit_deriv = None
"""
Function (similar to the model's `~Model.evaluate`) to compute the
derivatives of the model with respect to its parameters, for use by fitting
algorithms. In other words, this computes the Jacobian matrix with respect
to the model's parameters.
"""
# Flag that indicates if the model derivatives with respect to parameters
# are given in columns or rows
col_fit_deriv = True
fittable = True
class Fittable1DModel(FittableModel):
"""
Base class for one-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
n_inputs = 1
n_outputs = 1
_separable = True
class Fittable2DModel(FittableModel):
"""
Base class for two-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
n_inputs = 2
n_outputs = 1
def _make_arithmetic_operator(oper):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
def op(f, g):
return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2])
return op
def _composition_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: g[0](f[0](inputs, params), params),
f[1], g[2])
def _join_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: (f[0](inputs[:f[1]], params) +
g[0](inputs[f[1]:], params)),
f[1] + g[1], f[2] + g[2])
BINARY_OPERATORS = {
'+': _make_arithmetic_operator(operator.add),
'-': _make_arithmetic_operator(operator.sub),
'*': _make_arithmetic_operator(operator.mul),
'/': _make_arithmetic_operator(operator.truediv),
'**': _make_arithmetic_operator(operator.pow),
'|': _composition_operator,
'&': _join_operator
}
SPECIAL_OPERATORS = {}
def _add_special_operator(sop_name, sop):
SPECIAL_OPERATORS[sop_name] = sop
class CompoundModel(Model):
'''
Base class for compound models.
While it can be used directly, the recommended way
to combine models is through the model operators.
'''
def __init__(self, op, left, right, name=None, inverse=None):
self.__dict__['_param_names'] = None
self._n_submodels = None
self.op = op
self.left = left
self.right = right
self._bounding_box = None
self._user_bounding_box = None
self._leaflist = None
self._opset = None
self._tdict = None
self._parameters = None
self._parameters_ = None
self._param_metrics = None
self._has_inverse = False # may be set to True in following code
if inverse:
self.inverse = inverse
if op != 'fix_inputs' and len(left) != len(right):
raise ValueError(
'Both operands must have equal values for n_models')
self._n_models = len(left)
if op != 'fix_inputs' and ((left.model_set_axis != right.model_set_axis)
or left.model_set_axis): # not False and not 0
raise ValueError("model_set_axis must be False or 0 and consistent for operands")
self._model_set_axis = left.model_set_axis
if op in ['+', '-', '*', '/', '**'] or op in SPECIAL_OPERATORS:
if (left.n_inputs != right.n_inputs) or \
(left.n_outputs != right.n_outputs):
raise ModelDefinitionError(
'Both operands must match numbers of inputs and outputs')
self.n_inputs = left.n_inputs
self.n_outputs = left.n_outputs
self.inputs = left.inputs
self.outputs = left.outputs
elif op == '&':
self.n_inputs = left.n_inputs + right.n_inputs
self.n_outputs = left.n_outputs + right.n_outputs
self.inputs = combine_labels(left.inputs, right.inputs)
self.outputs = combine_labels(left.outputs, right.outputs)
if inverse is None and self.both_inverses_exist():
self._has_inverse = True
inv = CompoundModel('&',
self.left.inverse,
self.right.inverse,
inverse=self)
if left._user_inverse is not None or right._user_inverse is not None:
self._user_inverse = inv
if self.inverse._has_inverse:
del self._user_inverse._user_inverse
self.inverse._has_inverse = False
else:
self._inverse = inv
elif op == '|':
if left.n_outputs != right.n_inputs:
raise ModelDefinitionError(
"Unsupported operands for |: {0} (n_inputs={1}, "
"n_outputs={2}) and {3} (n_inputs={4}, n_outputs={5}); "
"n_outputs for the left-hand model must match n_inputs "
"for the right-hand model.".format(
left.name, left.n_inputs, left.n_outputs, right.name,
right.n_inputs, right.n_outputs))
self.n_inputs = left.n_inputs
self.n_outputs = right.n_outputs
self.inputs = left.inputs
self.outputs = right.outputs
if inverse is None and self.both_inverses_exist():
self._has_inverse = True
inv = CompoundModel('|',
self.right.inverse,
self.left.inverse,
inverse=self)
if left._user_inverse is not None or right._user_inverse is not None:
self._user_inverse = inv
if self.inverse._has_inverse:
del self._user_inverse._user_inverse
self.inverse._has_inverse = False
else:
self._inverse = inv
elif op == 'fix_inputs':
if not isinstance(left, Model):
raise ValueError('First argument to "fix_inputs" must be an instance of an astropy Model.')
if not isinstance(right, dict):
raise ValueError('Expected a dictionary for second argument of "fix_inputs".')
# Dict keys must match either possible indices
# for model on left side, or names for inputs.
self.n_inputs = left.n_inputs - len(right)
# Assign directly to the private attribute (instead of using the setter)
# to avoid asserting the new number of outputs matches the old one.
self._outputs = left.outputs
self.n_outputs = left.n_outputs
newinputs = list(left.inputs)
keys = right.keys()
input_ind = []
for key in keys:
if isinstance(key, int):
if key >= left.n_inputs or key < 0:
raise ValueError(
'Substitution key integer value '
'not among possible input choices.')
if key in input_ind:
raise ValueError("Duplicate specification of "
"same input (index/name).")
input_ind.append(key)
elif isinstance(key, str):
if key not in left.inputs:
raise ValueError(
'Substitution key string not among possible '
'input choices.')
# Check to see it doesn't match positional
# specification.
ind = left.inputs.index(key)
if ind in input_ind:
raise ValueError("Duplicate specification of "
"same input (index/name).")
input_ind.append(ind)
# Remove substituted inputs
input_ind.sort()
input_ind.reverse()
for ind in input_ind:
del newinputs[ind]
self.inputs = tuple(newinputs)
# Now check to see if the input model has bounding_box defined.
# If so, remove the appropriate dimensions and set it for this
# instance.
try:
bounding_box = self.left.bounding_box
self._fix_input_bounding_box(input_ind)
except NotImplementedError:
pass
else:
raise ModelDefinitionError('Illegal operator: ', self.op)
self.name = name
self._fittable = None
self.fit_deriv = None
self.col_fit_deriv = None
if op in ('|', '+', '-'):
self.linear = left.linear and right.linear
else:
self.linear = False
self.eqcons = []
self.ineqcons = []
self._map_parameters()
def evaluate(self, *args, **kwargs):
pass
@property
def n_submodels(self):
if self._leaflist is None:
self._make_leaflist()
return len(self._leaflist)
@property
def submodel_names(self):
""" Return the names of submodels in a ``CompoundModel``."""
if self._leaflist is None:
self._make_leaflist()
names = [item.name for item in self._leaflist]
nonecount = 0
newnames = []
for item in names:
if item is None:
newnames.append('None_{}'.format(nonecount))
nonecount += 1
else:
newnames.append(item)
return tuple(newnames)
def both_inverses_exist(self):
'''
if both members of this compound model have inverses return True
'''
try:
linv = self.left.inverse
rinv = self.right.inverse
except NotImplementedError:
return False
if isinstance(self.left, CompoundModel):
if not self.left.has_inverse():
return False
if isinstance(self.right, CompoundModel):
if not self.right.has_inverse():
return False
return True
def __call__(self, *args, **kw):
# Turn any keyword arguments into positional arguments.
args, kw = self._get_renamed_inputs_as_positional(*args, **kw)
# If equivalencies are provided, necessary to map parameters and pass
# the leaflist as a keyword input for use by model evaluation so that
# the compound model input names can be matched to the model input
# names.
if 'equivalencies' in kw:
# Restructure to be useful for the individual model lookup
kw['inputs_map'] = [(value[0], (value[1], key)) for
key, value in self.inputs_map().items()]
with_bbox = kw.pop('with_bounding_box', False)
fill_value = kw.pop('fill_value', np.nan)
# Use of bounding box for compound models requires special treatment
# in selecting only valid inputs to pass along to constituent models.
bbox = get_bounding_box(self)
if with_bbox and bbox is not None:
# first check inputs are consistent in shape
input_shape = _validate_input_shapes(args, (), self._n_models,
self.model_set_axis, self.standard_broadcasting)
vinputs, valid_ind, allout = prepare_bounding_box_inputs(self, input_shape, args, bbox)
if not allout:
valid_result = self._evaluate(*vinputs, **kw)
if self.n_outputs == 1:
valid_result = [valid_result]
outputs = prepare_bounding_box_outputs(valid_result, valid_ind,
input_shape, fill_value)
else:
outputs = [np.zeros(input_shape) + fill_value for i in range(self.n_outputs)]
if self.n_outputs == 1:
return outputs[0]
return outputs
else:
return self._evaluate(*args, **kw)
def _evaluate(self, *args, **kw):
op = self.op
if op != 'fix_inputs':
if op != '&':
leftval = self.left(*args, **kw)
if op != '|':
rightval = self.right(*args, **kw)
else:
leftval = self.left(*(args[:self.left.n_inputs]), **kw)
rightval = self.right(*(args[self.left.n_inputs:]), **kw)
if op == '+':
return binary_operation(operator.add, leftval, rightval)
elif op == '-':
return binary_operation(operator.sub, leftval, rightval)
elif op == '*':
return binary_operation(operator.mul, leftval, rightval)
elif op == '/':
return binary_operation(operator.truediv, leftval, rightval)
elif op == '**':
return binary_operation(operator.pow, leftval, rightval)
elif op == '&':
if not isinstance(leftval, tuple):
leftval = (leftval,)
if not isinstance(rightval, tuple):
rightval = (rightval,)
return leftval + rightval
elif op == '|':
if isinstance(leftval, tuple):
return self.right(*leftval, **kw)
else:
return self.right(leftval, **kw)
elif op in SPECIAL_OPERATORS:
return binary_operation(SPECIAL_OPERATORS[op], leftval, rightval)
else:
raise ModelDefinitionError('Unrecognized operator {op}')
else:
subs = self.right
newargs = list(args)
subinds = []
subvals = []
for key in subs.keys():
if isinstance(key, int):
subinds.append(key)
elif isinstance(key, str):
ind = self.left.inputs.index(key)
subinds.append(ind)
subvals.append(subs[key])
# Turn inputs specified in kw into positional indices.
# Names for compound inputs do not propagate to sub models.
kwind = []
kwval = []
for kwkey in list(kw.keys()):
if kwkey in self.inputs:
ind = self.inputs.index(kwkey)
if ind < len(args):
raise ValueError("Keyword argument duplicates "
"positional value supplied.")
kwind.append(ind)
kwval.append(kw[kwkey])
del kw[kwkey]
# Build new argument list
# Append keyword specified args first
if kwind:
kwargs = list(zip(kwind, kwval))
kwargs.sort()
kwindsorted, kwvalsorted = list(zip(*kwargs))
newargs = newargs + list(kwvalsorted)
if subinds:
subargs = list(zip(subinds, subvals))
subargs.sort()
# subindsorted, subvalsorted = list(zip(*subargs))
# The substitutions must be inserted in order
for ind, val in subargs:
newargs.insert(ind, val)
return self.left(*newargs, **kw)
@property
def param_names(self):
""" An ordered list of parameter names."""
return self._param_names
def _make_leaflist(self):
tdict = {}
leaflist = []
make_subtree_dict(self, '', tdict, leaflist)
self._leaflist = leaflist
self._tdict = tdict
def _make_opset(self):
""" Determine the set of operations used in this tree."""
self._opset = set()
get_ops(self, self._opset)
def __getattr__(self, name):
"""
If someone accesses an attribute not already defined, map the
parameters, and then see if the requested attribute is one of
the parameters
"""
# The following test is needed to avoid infinite recursion
# caused by deepcopy. There may be other such cases discovered.
if name == '__setstate__':
raise AttributeError
if name in self._param_names:
return self.__dict__[name]
else:
raise AttributeError('Attribute "{}" not found'.format(name))
def __getitem__(self, index):
if self._leaflist is None:
self._make_leaflist()
leaflist = self._leaflist
tdict = self._tdict
if isinstance(index, slice):
if index.step:
raise ValueError('Steps in slices not supported '
'for compound models')
if index.start is not None:
if isinstance(index.start, str):
start = self._str_index_to_int(index.start)
else:
start = index.start
else:
start = 0
if index.stop is not None:
if isinstance(index.stop, str):
stop = self._str_index_to_int(index.stop)
else:
stop = index.stop - 1
else:
stop = len(leaflist) - 1
if index.stop == 0:
raise ValueError("Slice endpoint cannot be 0")
if start < 0:
start = len(leaflist) + start
if stop < 0:
stop = len(leaflist) + stop
# now search for matching node:
if stop == start: # only single value, get leaf instead in code below
index = start
else:
for key in tdict:
node, leftind, rightind = tdict[key]
if leftind == start and rightind == stop:
return node
raise IndexError("No appropriate subtree matches slice")
if isinstance(index, type(0)):
return leaflist[index]
elif isinstance(index, type('')):
return leaflist[self._str_index_to_int(index)]
else:
raise TypeError('index must be integer, slice, or model name string')
def _str_index_to_int(self, str_index):
# Search through leaflist for item with that name
found = []
for nleaf, leaf in enumerate(self._leaflist):
if getattr(leaf, 'name', None) == str_index:
found.append(nleaf)
if len(found) == 0:
raise IndexError("No component with name '{}' found".format(str_index))
if len(found) > 1:
raise IndexError("Multiple components found using '{}' as name\n"
"at indices {}".format(str_index, found))
return found[0]
@property
def n_inputs(self):
""" The number of inputs of a model."""
return self._n_inputs
@n_inputs.setter
def n_inputs(self, value):
self._n_inputs = value
@property
def n_outputs(self):
""" The number of outputs of a model."""
return self._n_outputs
@n_outputs.setter
def n_outputs(self, value):
self._n_outputs = value
@property
def eqcons(self):
return self._eqcons
@eqcons.setter
def eqcons(self, value):
self._eqcons = value
@property
def ineqcons(self):
return self._eqcons
@ineqcons.setter
def ineqcons(self, value):
self._eqcons = value
def traverse_postorder(self, include_operator=False):
""" Postorder traversal of the CompoundModel tree."""
res = []
if isinstance(self.left, CompoundModel):
res = res + self.left.traverse_postorder(include_operator)
else:
res = res + [self.left]
if isinstance(self.right, CompoundModel):
res = res + self.right.traverse_postorder(include_operator)
else:
res = res + [self.right]
if include_operator:
res.append(self.op)
else:
res.append(self)
return res
def _format_expression(self, format_leaf=None):
leaf_idx = 0
operands = deque()
if format_leaf is None:
format_leaf = lambda i, l: '[{0}]'.format(i)
for node in self.traverse_postorder():
if not isinstance(node, CompoundModel):
operands.append(format_leaf(leaf_idx, node))
leaf_idx += 1
continue
oper_order = OPERATOR_PRECEDENCE[node.op]
right = operands.pop()
left = operands.pop()
if isinstance(node, CompoundModel):
if (isinstance(node.left, CompoundModel) and
OPERATOR_PRECEDENCE[node.left.op] < oper_order):
left = '({0})'.format(left)
if (isinstance(node.right, CompoundModel) and
OPERATOR_PRECEDENCE[node.right.op] < oper_order):
right = '({0})'.format(right)
operands.append(' '.join((left, node.op, right)))
return ''.join(operands)
def _format_components(self):
if self._parameters_ is None:
self._map_parameters()
return '\n\n'.join('[{0}]: {1!r}'.format(idx, m)
for idx, m in enumerate(self._leaflist))
def __str__(self):
expression = self._format_expression()
components = self._format_components()
keywords = [
('Expression', expression),
('Components', '\n' + indent(components))
]
return super()._format_str(keywords=keywords)
def rename(self, name):
self.name = name
return self
@property
def isleaf(self):
return False
def has_inverse(self):
return self._has_inverse
@property
def inverse(self):
if self.has_inverse():
if self._user_inverse is not None:
return self._user_inverse
return self._inverse
else:
raise NotImplementedError("Inverse function not provided")
@inverse.setter
def inverse(self, value):
if not isinstance(value, Model):
raise ValueError("Attempt to assign non model to inverse")
self._user_inverse = value
self._has_inverse = True
@inverse.deleter
def inverse(self):
self._has_inverse = False
self._user_inverse = None
@property
def fittable(self):
""" Set the fittable attribute on a compound model."""
if self._fittable is None:
if self._leaflist is None:
self._map_parameters()
self._fittable = all(m.fittable for m in self._leaflist)
return self._fittable
__add__ = _model_oper('+')
__sub__ = _model_oper('-')
__mul__ = _model_oper('*')
__truediv__ = _model_oper('/')
__pow__ = _model_oper('**')
__or__ = _model_oper('|')
__and__ = _model_oper('&')
def _map_parameters(self):
"""
Map all the constituent model parameters to the compound object,
renaming as necessary by appending a suffix number.
This can be an expensive operation, particularly for a complex
expression tree.
All the corresponding parameter attributes are created that one
expects for the Model class.
The parameter objects that the attributes point to are the same
objects as in the constiutent models. Changes made to parameter
values to either are seen by both.
Prior to calling this, none of the associated attributes will
exist. This method must be called to make the model usable by
fitting engines.
If oldnames=True, then parameters are named as in the original
implementation of compound models.
"""
if self._parameters is not None:
# do nothing
return
if self._leaflist is None:
self._make_leaflist()
self._parameters_ = OrderedDict()
param_map = {}
self._param_names = []
for lindex, leaf in enumerate(self._leaflist):
if not isinstance(leaf, dict):
for param_name in leaf.param_names:
param = getattr(leaf, param_name)
new_param_name = "{}_{}".format(param_name, lindex)
self.__dict__[new_param_name] = param
self._parameters_[new_param_name] = param
self._param_names.append(new_param_name)
param_map[new_param_name] = (lindex, param_name)
self._param_metrics = {}
self._param_map = param_map
self._param_map_inverse = dict((v, k) for k, v in param_map.items())
self._initialize_slices()
self._param_names = tuple(self._param_names)
def _initialize_slices(self):
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name] = {}
param_metrics[name]['slice'] = param_slice
param_metrics[name]['shape'] = param_shape
param_metrics[name]['size'] = param_size
total_size += param_size
self._parameters = np.empty(total_size, dtype=np.float64)
@staticmethod
def _recursive_lookup(branch, adict, key):
if isinstance(branch, CompoundModel):
return adict[key]
return branch, key
def inputs_map(self):
"""
Map the names of the inputs to this ExpressionTree to the inputs to the leaf models.
"""
inputs_map = {}
if not isinstance(self.op, str): # If we don't have an operator the mapping is trivial
return {inp: (self, inp) for inp in self.inputs}
elif self.op == '|':
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
for inp in self.inputs:
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[inp]
else:
inputs_map[inp] = self.left, inp
elif self.op == '&':
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
if isinstance(self.right, CompoundModel):
r_inputs_map = self.right.inputs_map()
for i, inp in enumerate(self.inputs):
if i < len(self.left.inputs): # Get from left
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[self.left.inputs[i]]
else:
inputs_map[inp] = self.left, self.left.inputs[i]
else: # Get from right
if isinstance(self.right, CompoundModel):
inputs_map[inp] = r_inputs_map[self.right.inputs[i - len(self.left.inputs)]]
else:
inputs_map[inp] = self.right, self.right.inputs[i - len(self.left.inputs)]
elif self.op == 'fix_inputs':
fixed_ind = list(self.right.keys())
ind = [list(self.left.inputs).index(i) if isinstance(i, str) else i for i in fixed_ind]
inp_ind = list(range(self.left.n_inputs))
for i in ind:
inp_ind.remove(i)
for i in inp_ind:
inputs_map[self.left.inputs[i]] = self.left, self.left.inputs[i]
else:
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
for inp in self.left.inputs:
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[inp]
else:
inputs_map[inp] = self.left, inp
return inputs_map
def _parameter_units_for_data_units(self, input_units, output_units):
if self._leaflist is None:
self._map_parameters()
units_for_data = {}
for imodel, model in enumerate(self._leaflist):
units_for_data_leaf = model._parameter_units_for_data_units(input_units, output_units)
for param_leaf in units_for_data_leaf:
param = self._param_map_inverse[(imodel, param_leaf)]
units_for_data[param] = units_for_data_leaf[param_leaf]
return units_for_data
@property
def input_units(self):
inputs_map = self.inputs_map()
input_units_dict = {key: inputs_map[key][0].input_units[orig_key]
for key, (mod, orig_key) in inputs_map.items()
if inputs_map[key][0].input_units is not None}
if input_units_dict:
return input_units_dict
return None
@property
def input_units_equivalencies(self):
inputs_map = self.inputs_map()
return {key: inputs_map[key][0].input_units_equivalencies[orig_key]
for key, (mod, orig_key) in inputs_map.items()
if inputs_map[key][0].input_units_equivalencies is not None}
@property
def input_units_allow_dimensionless(self):
inputs_map = self.inputs_map()
return {key: inputs_map[key][0].input_units_allow_dimensionless[orig_key]
for key, (mod, orig_key) in inputs_map.items()}
@property
def input_units_strict(self):
inputs_map = self.inputs_map()
return {key: inputs_map[key][0].input_units_strict[orig_key]
for key, (mod, orig_key) in inputs_map.items()}
@property
def return_units(self):
outputs_map = self.outputs_map()
return {key: outputs_map[key][0].return_units[orig_key]
for key, (mod, orig_key) in outputs_map.items()
if outputs_map[key][0].return_units is not None}
def outputs_map(self):
"""
Map the names of the outputs to this ExpressionTree to the outputs to the leaf models.
"""
outputs_map = {}
if not isinstance(self.op, str): # If we don't have an operator the mapping is trivial
return {out: (self, out) for out in self.outputs}
elif self.op == '|':
if isinstance(self.right, CompoundModel):
r_outputs_map = self.right.outputs_map()
for out in self.outputs:
if isinstance(self.right, CompoundModel):
outputs_map[out] = r_outputs_map[out]
else:
outputs_map[out] = self.right, out
elif self.op == '&':
if isinstance(self.left, CompoundModel):
l_outputs_map = self.left.outputs_map()
if isinstance(self.right, CompoundModel):
r_outputs_map = self.right.outputs_map()
for i, out in enumerate(self.outputs):
if i < len(self.left.outputs): # Get from left
if isinstance(self.left, CompoundModel):
outputs_map[out] = l_outputs_map[self.left.outputs[i]]
else:
outputs_map[out] = self.left, self.left.outputs[i]
else: # Get from right
if isinstance(self.right, CompoundModel):
outputs_map[out] = r_outputs_map[self.right.outputs[i - len(self.left.outputs)]]
else:
outputs_map[out] = self.right, self.right.outputs[i - len(self.left.outputs)]
elif self.op == 'fix_inputs':
return self.left.outputs_map()
else:
if isinstance(self.left, CompoundModel):
l_outputs_map = self.left.outputs_map()
for out in self.left.outputs:
if isinstance(self.left, CompoundModel):
outputs_map[out] = l_outputs_map()[out]
else:
outputs_map[out] = self.left, out
return outputs_map
def _fix_input_bounding_box(self, input_ind):
"""
If the ``fix_inputs`` operator is used and the model it is applied to
has a bounding box definition, delete the corresponding inputs from
that bounding box. This method presumes the bounding_box is not None.
This also presumes that the list of input indices to remove (i.e.,
input_ind has already been put in reverse sorted order).
"""
bounding_box = list(self.left.bounding_box)
for ind in input_ind:
del bounding_box[ind]
if self.n_inputs == 1:
bounding_box = bounding_box[0]
self.bounding_box = bounding_box
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array_like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of
the returned array. If this is not provided (or None), the model
will be evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be
passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of
this model is not set.
Examples
--------
:ref:`bounding-boxes`
"""
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError('If no bounding_box is set, '
'coords or out must be input.')
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError('inconsistent shape of the output.')
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out, dtype=float)
if out.ndim != ndim:
raise ValueError('the array and model must have the same '
'number of dimensions.')
if bbox is not None:
# Assures position is at center pixel, important when usin
# add_array.
pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))
for bb in bbox]).astype(int).T
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos)
for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
'The `bounding_box` is larger than the input out in '
'one or more dimensions. Set '
'`model.bounding_box = None`.')
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
def replace_submodel(self, name, model):
"""
Construct a new `~astropy.modeling.CompoundModel` instance from an
existing CompoundModel, replacing the named submodel with a new model.
In order to ensure that inverses and names are kept/reconstructed, it's
necessary to rebuild the CompoundModel from the replaced node all the
way back to the base. The original CompoundModel is left untouched.
Parameters
----------
name : str
name of submodel to be replaced
model : `~astropy.modeling.Model`
replacement model
"""
submodels = [m for m in self.traverse_postorder()
if getattr(m, 'name', None) == name]
if submodels:
if len(submodels) > 1:
raise ValueError(f"More than one submodel named {name}")
old_model = submodels.pop()
if len(old_model) != len(model):
raise ValueError("New and old models must have equal values "
"for n_models")
# Do this check first in order to raise a more helpful Exception,
# although it would fail trying to construct the new CompoundModel
if (old_model.n_inputs != model.n_inputs or
old_model.n_outputs != model.n_outputs):
raise ValueError("New model must match numbers of inputs and "
"outputs of existing model")
tree = _get_submodel_path(self, name)
while tree:
branch = self.copy()
for node in tree[:-1]:
branch = getattr(branch, node)
setattr(branch, tree[-1], model)
model = CompoundModel(branch.op, branch.left, branch.right,
name=branch.name,
inverse=branch._user_inverse)
tree = tree[:-1]
return model
else:
raise ValueError(f"No submodels found named {name}")
def _get_submodel_path(model, name):
"""Find the route down a CompoundModel's tree to the model with the
specified name (whether it's a leaf or not)"""
if getattr(model, 'name', None) == name:
return []
try:
return ['left'] + _get_submodel_path(model.left, name)
except (AttributeError, TypeError):
pass
try:
return ['right'] + _get_submodel_path(model.right, name)
except (AttributeError, TypeError):
pass
def binary_operation(binoperator, left, right):
'''
Perform binary operation. Operands may be matching tuples of operands.
'''
if isinstance(left, tuple) and isinstance(right, tuple):
return tuple([binoperator(item[0], item[1])
for item in zip(left, right)])
return binoperator(left, right)
def get_ops(tree, opset):
"""
Recursive function to collect operators used.
"""
if isinstance(tree, CompoundModel):
opset.add(tree.op)
get_ops(tree.left, opset)
get_ops(tree.right, opset)
else:
return
def make_subtree_dict(tree, nodepath, tdict, leaflist):
'''
Traverse a tree noting each node by a key that indicates all the
left/right choices necessary to reach that node. Each key will
reference a tuple that contains:
- reference to the compound model for that node.
- left most index contained within that subtree
(relative to all indices for the whole tree)
- right most index contained within that subtree
'''
# if this is a leaf, just append it to the leaflist
if not hasattr(tree, 'isleaf'):
leaflist.append(tree)
else:
leftmostind = len(leaflist)
make_subtree_dict(tree.left, nodepath+'l', tdict, leaflist)
make_subtree_dict(tree.right, nodepath+'r', tdict, leaflist)
rightmostind = len(leaflist)-1
tdict[nodepath] = (tree, leftmostind, rightmostind)
_ORDER_OF_OPERATORS = [('fix_inputs',), ('|',), ('&',), ('+', '-'), ('*', '/'), ('**',)]
OPERATOR_PRECEDENCE = {}
for idx, ops in enumerate(_ORDER_OF_OPERATORS):
for op in ops:
OPERATOR_PRECEDENCE[op] = idx
del idx, op, ops
def fix_inputs(modelinstance, values):
"""
This function creates a compound model with one or more of the input
values of the input model assigned fixed values (scalar or array).
Parameters
----------
modelinstance : Model instance. This is the model that one or more of the
model input values will be fixed to some constant value.
values : A dictionary where the key identifies which input to fix
and its value is the value to fix it at. The key may either be the
name of the input or a number reflecting its order in the inputs.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> g = Gaussian2D(1, 2, 3, 4, 5)
>>> gv = fix_inputs(g, {0: 2.5})
Results in a 1D function equivalent to Gaussian2D(1, 2, 3, 4, 5)(x=2.5, y)
"""
return CompoundModel('fix_inputs', modelinstance, values)
def custom_model(*args, fit_deriv=None, **kwargs):
"""
Create a model from a user defined function. The inputs and parameters of
the model will be inferred from the arguments of the function.
This can be used either as a function or as a decorator. See below for
examples of both usages.
.. note::
All model parameters have to be defined as keyword arguments with
default values in the model function. Use `None` as a default argument
value if you do not want to have a default value for that parameter.
Parameters
----------
func : function
Function which defines the model. It should take N positional
arguments where ``N`` is dimensions of the model (the number of
independent variable in the model), and any number of keyword arguments
(the parameters). It must return the value of the model (typically as
an array, but can also be a scalar for scalar inputs). This
corresponds to the `~astropy.modeling.Model.evaluate` method.
fit_deriv : function, optional
Function which defines the Jacobian derivative of the model. I.e., the
derivative with respect to the *parameters* of the model. It should
have the same argument signature as ``func``, but should return a
sequence where each element of the sequence is the derivative
with respect to the corresponding argument. This corresponds to the
:meth:`~astropy.modeling.FittableModel.fit_deriv` method.
Examples
--------
Define a sinusoidal model function as a custom 1D model::
>>> from astropy.modeling.models import custom_model
>>> import numpy as np
>>> def sine_model(x, amplitude=1., frequency=1.):
... return amplitude * np.sin(2 * np.pi * frequency * x)
>>> def sine_deriv(x, amplitude=1., frequency=1.):
... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)
>>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)
Create an instance of the custom model and evaluate it::
>>> model = SineModel()
>>> model(0.25)
1.0
This model instance can now be used like a usual astropy model.
The next example demonstrates a 2D Moffat function model, and also
demonstrates the support for docstrings (this example could also include
a derivative, but it has been omitted for simplicity)::
>>> @custom_model
... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,
... alpha=1.0):
... \"\"\"Two dimensional Moffat function.\"\"\"
... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
... return amplitude * (1 + rr_gg) ** (-alpha)
...
>>> print(Moffat2D.__doc__)
Two dimensional Moffat function.
>>> model = Moffat2D()
>>> model(1, 1) # doctest: +FLOAT_CMP
0.3333333333333333
"""
if kwargs:
warnings.warn(
"Function received unexpected arguments ({}) these "
"are ignored but will raise an Exception in the "
"future.".format(list(kwargs)),
AstropyDeprecationWarning)
if len(args) == 1 and callable(args[0]):
return _custom_model_wrapper(args[0], fit_deriv=fit_deriv)
elif not args:
return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv)
else:
raise TypeError(
"{0} takes at most one positional argument (the callable/"
"function to be turned into a model. When used as a decorator "
"it should be passed keyword arguments only (if "
"any).".format(__name__))
def _custom_model_wrapper(func, fit_deriv=None):
"""
Internal implementation `custom_model`.
When `custom_model` is called as a function its arguments are passed to
this function, and the result of this function is returned.
When `custom_model` is used as a decorator a partial evaluation of this
function is returned by `custom_model`.
"""
if not callable(func):
raise ModelDefinitionError(
"func is not callable; it must be a function or other callable "
"object")
if fit_deriv is not None and not callable(fit_deriv):
raise ModelDefinitionError(
"fit_deriv not callable; it must be a function or other "
"callable object")
model_name = func.__name__
inputs, params = get_inputs_and_params(func)
if (fit_deriv is not None and
len(fit_deriv.__defaults__) != len(params)):
raise ModelDefinitionError("derivative function should accept "
"same number of parameters as func.")
# TODO: Maybe have a clever scheme for default output name?
if inputs:
output_names = (inputs[0].name,)
else:
output_names = ('x',)
params = OrderedDict((param.name, Parameter(param.name,
default=param.default)) for param in params)
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = '__main__'
members = OrderedDict([
('__module__', str(modname)),
('__doc__', func.__doc__),
('n_inputs', len(inputs)),
# tuple(x.name for x in inputs)),
('n_outputs', len(output_names)),
('evaluate', staticmethod(func))])
if fit_deriv is not None:
members['fit_deriv'] = staticmethod(fit_deriv)
members.update(params)
return type(model_name, (FittableModel,), members)
def render_model(model, arr=None, coords=None):
"""
Evaluates a model on an input array. Evaluation is limited to
a bounding box if the `Model.bounding_box` attribute is set.
Parameters
----------
model : `Model`
Model to be evaluated.
arr : `numpy.ndarray`, optional
Array on which the model is evaluated.
coords : array_like, optional
Coordinate arrays mapping to ``arr``, such that
``arr[coords] == arr``.
Returns
-------
array : `numpy.ndarray`
The model evaluated on the input ``arr`` or a new array from
``coords``.
If ``arr`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Examples
--------
:ref:`bounding-boxes`
"""
bbox = model.bounding_box
if (coords is None) & (arr is None) & (bbox is None):
raise ValueError('If no bounding_box is set,'
'coords or arr must be input.')
# for consistent indexing
if model.n_inputs == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if arr is not None:
arr = arr.copy()
# Check dimensions match model
if arr.ndim != model.n_inputs:
raise ValueError('number of array dimensions inconsistent with '
'number of model inputs.')
if coords is not None:
# Check dimensions match arr and model
coords = np.array(coords)
if len(coords) != model.n_inputs:
raise ValueError('coordinate length inconsistent with the number '
'of model inputs.')
if arr is not None:
if coords[0].shape != arr.shape:
raise ValueError('coordinate shape inconsistent with the '
'array shape.')
else:
arr = np.zeros(coords[0].shape)
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = pos, delta = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))
for bb in bbox]).astype(int).T
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos)
for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if arr is None:
arr = model(*sub_coords)
else:
try:
arr = add_array(arr, model(*sub_coords), pos)
except ValueError:
raise ValueError('The `bounding_box` is larger than the input'
' arr in one or more dimensions. Set '
'`model.bounding_box = None`.')
else:
if coords is None:
im_shape = arr.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
arr += model(*coords[::-1])
return arr
def _prepare_inputs_single_model(model, params, inputs, **kwargs):
broadcasts = []
for idx, _input in enumerate(inputs):
input_shape = _input.shape
# Ensure that array scalars are always upgrade to 1-D arrays for the
# sake of consistency with how parameters work. They will be cast back
# to scalars at the end
if not input_shape:
inputs[idx] = _input.reshape((1,))
if not params:
max_broadcast = input_shape
else:
max_broadcast = ()
for param in params:
try:
if model.standard_broadcasting:
broadcast = check_broadcast(input_shape, param.shape)
else:
broadcast = input_shape
except IncompatibleShapeError:
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot be "
"broadcast with parameter {2!r} of shape "
"{3!r}.".format(model.inputs[idx], input_shape,
param.name, param.shape))
if len(broadcast) > len(max_broadcast):
max_broadcast = broadcast
elif len(broadcast) == len(max_broadcast):
max_broadcast = max(max_broadcast, broadcast)
broadcasts.append(max_broadcast)
if model.n_outputs > model.n_inputs:
if len(set(broadcasts)) > 1:
raise ValueError(
"For models with n_outputs > n_inputs, the combination of "
"all inputs and parameters must broadcast to the same shape, "
"which will be used as the shape of all outputs. In this "
"case some of the inputs had different shapes, so it is "
"ambiguous how to format outputs for this model. Try using "
"inputs that are all the same size and shape.")
# Extend the broadcasts list to include shapes for all outputs
extra_outputs = model.n_outputs - model.n_inputs
if not broadcasts:
# If there were no inputs then the broadcasts list is empty
# just add a None since there is no broadcasting of outputs and
# inputs necessary (see _prepare_outputs_single_model)
broadcasts.append(None)
broadcasts.extend([broadcasts[0]] * extra_outputs)
return inputs, (broadcasts,)
def _prepare_outputs_single_model(outputs, format_info):
broadcasts = format_info[0]
outputs = list(outputs)
for idx, output in enumerate(outputs):
broadcast_shape = broadcasts[idx]
if broadcast_shape is not None:
if not broadcast_shape:
# Shape is (), i.e. a scalar should be returned
outputs[idx] = output.item()
else:
outputs[idx] = output.reshape(broadcast_shape)
return tuple(outputs)
def _prepare_inputs_model_set(model, params, inputs, n_models, model_set_axis_input,
**kwargs):
reshaped = []
pivots = []
model_set_axis_param = model.model_set_axis # needed to reshape param
for idx, _input in enumerate(inputs):
max_param_shape = ()
if n_models > 1 and model_set_axis_input is not False:
# Use the shape of the input *excluding* the model axis
input_shape = (_input.shape[:model_set_axis_input] +
_input.shape[model_set_axis_input + 1:])
else:
input_shape = _input.shape
for param in params:
try:
check_broadcast(input_shape,
remove_axes_from_shape(param.shape,
model_set_axis_param))
except IncompatibleShapeError:
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot be "
"broadcast with parameter {2!r} of shape "
"{3!r}.".format(model.inputs[idx], input_shape,
param.name,
remove_axes_from_shape(param.shape,
model_set_axis_param)))
if len(param.shape) - 1 > len(max_param_shape):
max_param_shape = remove_axes_from_shape(param.shape,
model_set_axis_param)
# We've now determined that, excluding the model_set_axis, the
# input can broadcast with all the parameters
input_ndim = len(input_shape)
if model_set_axis_input is False:
if len(max_param_shape) > input_ndim:
# Just needs to prepend new axes to the input
n_new_axes = 1 + len(max_param_shape) - input_ndim
new_axes = (1,) * n_new_axes
new_shape = new_axes + _input.shape
pivot = model_set_axis_param
else:
pivot = input_ndim - len(max_param_shape)
new_shape = (_input.shape[:pivot] + (1,) +
_input.shape[pivot:])
new_input = _input.reshape(new_shape)
else:
if len(max_param_shape) >= input_ndim:
n_new_axes = len(max_param_shape) - input_ndim
pivot = model.model_set_axis
new_axes = (1,) * n_new_axes
new_shape = (_input.shape[:pivot + 1] + new_axes +
_input.shape[pivot + 1:])
new_input = _input.reshape(new_shape)
else:
pivot = _input.ndim - len(max_param_shape) - 1
new_input = np.rollaxis(_input, model_set_axis_input,
pivot + 1)
pivots.append(pivot)
reshaped.append(new_input)
if model.n_inputs < model.n_outputs:
pivots.extend([model_set_axis_input] * (model.n_outputs - model.n_inputs))
return reshaped, (pivots,)
def _prepare_outputs_model_set(model, outputs, format_info, model_set_axis):
pivots = format_info[0]
# If model_set_axis = False was passed then use
# model._model_set_axis to format the output.
if model_set_axis is None or model_set_axis is False:
model_set_axis = model.model_set_axis
outputs = list(outputs)
for idx, output in enumerate(outputs):
pivot = pivots[idx]
if pivot < output.ndim and pivot != model_set_axis:
outputs[idx] = np.rollaxis(output, pivot,
model_set_axis)
return tuple(outputs)
def _validate_input_shapes(inputs, argnames, n_models, model_set_axis,
validate_broadcasting):
"""
Perform basic validation of model inputs--that they are mutually
broadcastable and that they have the minimum dimensions for the given
model_set_axis.
If validation succeeds, returns the total shape that will result from
broadcasting the input arrays with each other.
"""
check_model_set_axis = n_models > 1 and model_set_axis is not False
all_shapes = []
for idx, _input in enumerate(inputs):
input_shape = np.shape(_input)
# Ensure that the input's model_set_axis matches the model's
# n_models
if input_shape and check_model_set_axis:
# Note: Scalar inputs *only* get a pass on this
if len(input_shape) < model_set_axis + 1:
raise ValueError(
"For model_set_axis={0}, all inputs must be at "
"least {1}-dimensional.".format(
model_set_axis, model_set_axis + 1))
if input_shape[model_set_axis] != n_models:
try:
argname = argnames[idx]
except IndexError:
# the case of model.inputs = ()
argname = str(idx)
raise ValueError(
"Input argument {0!r} does not have the correct "
"dimensions in model_set_axis={1} for a model set with "
"n_models={2}.".format(argname, model_set_axis,
n_models))
all_shapes.append(input_shape)
input_shape = check_consistent_shapes(*all_shapes)
if input_shape is None:
raise ValueError(
"All inputs must have identical shapes or must be scalars.")
return input_shape
def remove_axes_from_shape(shape, axis):
"""
Given a shape tuple as the first input, construct a new one by removing
that particular axis from the shape and all preceeding axes. Negative axis
numbers are permittted, where the axis is relative to the last axis.
"""
if len(shape) == 0:
return shape
if axis < 0:
axis = len(shape) + axis
return shape[:axis] + shape[axis+1:]
if axis >= len(shape):
axis = len(shape)-1
shape = shape[axis+1:]
return shape
def check_consistent_shapes(*shapes):
"""
Given shapes as arguments, check to see if all are the same (excluding
scalars, i.e., shape==(); if all the same, return the common shape; if
not, return None)
"""
# remove scalars from the list
ashapes = [shape for shape in shapes if shape != ()]
if len(ashapes) == 0:
return ()
if len(ashapes) == 1:
return ashapes[0]
rshape = ashapes[0]
for shape in ashapes[1:]:
if shape != rshape:
return None
return rshape
def get_bounding_box(self):
"""
Return the ``bounding_box`` of a model.
Raises
------
NotImplementedError
If ``bounding_box`` is not defined.
"""
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
return bbox
def generic_call(self, *inputs, **kwargs):
""" The base ``Model. __call__`` method."""
inputs, format_info = self.prepare_inputs(*inputs, **kwargs)
if isinstance(self, CompoundModel):
# CompoundModels do not normally hold parameters at that level
parameters = ()
else:
parameters = self._param_sets(raw=True, units=True)
with_bbox = kwargs.pop('with_bounding_box', False)
fill_value = kwargs.pop('fill_value', np.nan)
bbox = get_bounding_box(self)
if with_bbox and bbox is not None:
input_shape = _validate_input_shapes(
inputs, self.inputs, self._n_models, self.model_set_axis,
self.standard_broadcasting)
vinputs, valid_ind, allout = prepare_bounding_box_inputs(
self, input_shape, inputs, bbox)
valid_result_unit = None
if not allout:
valid_result = self.evaluate(*chain(vinputs, parameters))
valid_result_unit = getattr(valid_result, 'unit', None)
if self.n_outputs == 1:
valid_result = [valid_result]
outputs = prepare_bounding_box_outputs(valid_result, valid_ind,
input_shape, fill_value)
else:
outputs = [np.zeros(input_shape) + fill_value for i in range(self.n_outputs)]
if valid_result_unit is not None:
outputs = Quantity(outputs, valid_result_unit, copy=False)
else:
outputs = self.evaluate(*chain(inputs, parameters))
if self.n_outputs == 1:
outputs = (outputs,)
outputs = self.prepare_outputs(format_info, *outputs, **kwargs)
outputs = self._process_output_units(inputs, outputs)
if self.n_outputs == 1:
return outputs[0]
return outputs
def prepare_bounding_box_inputs(self, input_shape, inputs, bbox):
"""
Assign a value of ``np.nan`` to indices outside the bounding box.
"""
allout = False
if self.n_inputs > 1:
# bounding_box is in python order -
# convert it to the order of the inputs
bbox = bbox[::-1]
if self.n_inputs == 1:
bbox = [bbox]
# indices where input is outside the bbox
# have a value of 1 in ``nan_ind``
nan_ind = np.zeros(input_shape, dtype=bool)
for ind, inp in enumerate(inputs):
inp = np.asanyarray(inp)
outside = np.logical_or(inp < bbox[ind][0], inp > bbox[ind][1])
if inp.shape:
nan_ind[outside] = True
else:
nan_ind |= outside
if nan_ind:
allout = True
# get an array with indices of valid inputs
valid_ind = np.atleast_1d(np.logical_not(nan_ind)).nonzero()
if len(valid_ind[0]) == 0:
allout = True
# inputs holds only inputs within the bbox
args = []
if not allout:
for inp in inputs:
if input_shape:
args.append(np.array(inp)[valid_ind])
else:
args.append(inp)
return args, valid_ind, allout
def prepare_bounding_box_outputs(valid_result, valid_ind, input_shape, fill_value):
"""
Populate the output arrays with ``fill_value``.
"""
result = [np.zeros(input_shape) + fill_value
for vr in valid_result]
for ind, r in enumerate(valid_result):
if not result[ind].shape:
result[ind] = np.array(r)
else:
result[ind][valid_ind] = r
return result
def _strip_ones(intup):
return tuple(item for item in intup if item != 1)
def hide_inverse(model):
"""
This is a convenience function intended to disable automatic generation
of the inverse in compound models by disabling one of the constituent
model's inverse. This is to handle cases where user provided inverse
functions are not compatible within an expression.
Example:
compound_model.inverse = hide_inverse(m1) + m2 + m3
This will insure that the defined inverse itself won't attempt to
build its own inverse, which would otherwise fail in this example
(e.g., m = m1 + m2 + m3 happens to raises an exception for this
reason.)
Note that this permanently disables it. To prevent that either copy
the model or restore the inverse later.
"""
del model.inverse
return model
| [
"numpy.empty",
"astropy.utils.indent",
"collections.defaultdict",
"numpy.shape",
"inspect.isabstract",
"numpy.mean",
"astropy.utils.codegen.make_function_with_signature",
"collections.deque",
"astropy.utils.isiterable",
"numpy.logical_not",
"numpy.ndim",
"inspect.signature",
"numpy.rollaxis"... | [((25352, 25371), 'astropy.utils.metadata.MetaData', 'metadata.MetaData', ([], {}), '()\n', (25369, 25371), False, 'from astropy.utils import indent, metadata\n'), ((142922, 142944), 'astropy.utils.find_current_module', 'find_current_module', (['(2)'], {}), '(2)\n', (142941, 142944), False, 'from astropy.utils import sharedmethod, find_current_module, check_broadcast, IncompatibleShapeError, isiterable\n'), ((158895, 158928), 'numpy.zeros', 'np.zeros', (['input_shape'], {'dtype': 'bool'}), '(input_shape, dtype=bool)\n', (158903, 158928), True, 'import numpy as np\n'), ((2697, 2710), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2708, 2710), False, 'from collections import defaultdict, OrderedDict, deque\n'), ((3568, 3591), 'astropy.utils.misc.get_parameters', 'get_parameters', (['members'], {}), '(members)\n', (3582, 3591), False, 'from astropy.utils.misc import get_parameters\n'), ((5300, 5313), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5311, 5313), False, 'from collections import defaultdict, OrderedDict, deque\n'), ((8137, 8159), 'astropy.utils.find_current_module', 'find_current_module', (['(2)'], {}), '(2)\n', (8156, 8159), False, 'from astropy.utils import sharedmethod, find_current_module, check_broadcast, IncompatibleShapeError, isiterable\n'), ((69433, 69452), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (69446, 69452), False, 'import copy\n'), ((69561, 69580), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (69574, 69580), False, 'import copy\n'), ((78772, 78789), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (78783, 78789), False, 'from collections import defaultdict, OrderedDict, deque\n'), ((85725, 85763), 'numpy.empty', 'np.empty', (['total_size'], {'dtype': 'np.float64'}), '(total_size, dtype=np.float64)\n', (85733, 85763), True, 'import numpy as np\n'), ((91728, 91744), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (91736, 91744), True, 'import numpy as np\n'), ((115383, 115390), 'collections.deque', 'deque', ([], {}), '()\n', (115388, 115390), False, 'from collections import defaultdict, OrderedDict, deque\n'), ((119260, 119273), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (119271, 119273), False, 'from collections import defaultdict, OrderedDict, deque\n'), ((120715, 120753), 'numpy.empty', 'np.empty', (['total_size'], {'dtype': 'np.float64'}), '(total_size, dtype=np.float64)\n', (120723, 120753), True, 'import numpy as np\n'), ((145074, 145090), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (145082, 145090), True, 'import numpy as np\n'), ((154002, 154018), 'numpy.shape', 'np.shape', (['_input'], {}), '(_input)\n', (154010, 154018), True, 'import numpy as np\n'), ((158982, 159000), 'numpy.asanyarray', 'np.asanyarray', (['inp'], {}), '(inp)\n', (158995, 159000), True, 'import numpy as np\n'), ((159019, 159072), 'numpy.logical_or', 'np.logical_or', (['(inp < bbox[ind][0])', '(inp > bbox[ind][1])'], {}), '(inp < bbox[ind][0], inp > bbox[ind][1])\n', (159032, 159072), True, 'import numpy as np\n'), ((11079, 11102), 'inspect.signature', 'signature', (['bounding_box'], {}), '(bounding_box)\n', (11088, 11102), False, 'from inspect import signature\n'), ((15649, 15747), 'astropy.utils.codegen.make_function_with_signature', 'make_function_with_signature', (['__call__', 'args', 'kwargs'], {'varargs': '"""inputs"""', 'varkwargs': '"""new_inputs"""'}), "(__call__, args, kwargs, varargs='inputs',\n varkwargs='new_inputs')\n", (15677, 15747), False, 'from astropy.utils.codegen import make_function_with_signature\n'), ((17152, 17224), 'astropy.utils.codegen.make_function_with_signature', 'make_function_with_signature', (['__init__', 'args', 'kwargs'], {'varkwargs': '"""kwargs"""'}), "(__init__, args, kwargs, varkwargs='kwargs')\n", (17180, 17224), False, 'from astropy.utils.codegen import make_function_with_signature\n'), ((28054, 28433), 'warnings.warn', 'warnings.warn', (['f"""Class {self.__class__.__name__} defines class attributes ``inputs``.\n This has been deprecated in v4.0 and support will be removed in v4.1.\n Starting with v4.0 classes must define a class attribute ``n_inputs``.\n Please consult the documentation for details.\n """', 'AstropyDeprecationWarning'], {}), '(\n f"""Class {self.__class__.__name__} defines class attributes ``inputs``.\n This has been deprecated in v4.0 and support will be removed in v4.1.\n Starting with v4.0 classes must define a class attribute ``n_inputs``.\n Please consult the documentation for details.\n """\n , AstropyDeprecationWarning)\n', (28067, 28433), False, 'import warnings\n'), ((57480, 57514), 'numpy.asanyarray', 'np.asanyarray', (['coords'], {'dtype': 'float'}), '(coords, dtype=float)\n', (57493, 57514), True, 'import numpy as np\n'), ((57873, 57904), 'numpy.asanyarray', 'np.asanyarray', (['out'], {'dtype': 'float'}), '(out, dtype=float)\n', (57886, 57904), True, 'import numpy as np\n'), ((62436, 62470), 'numpy.asanyarray', 'np.asanyarray', (['_input'], {'dtype': 'float'}), '(_input, dtype=float)\n', (62449, 62470), True, 'import numpy as np\n'), ((85117, 85132), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (85125, 85132), True, 'import numpy as np\n'), ((85372, 85386), 'numpy.size', 'np.size', (['value'], {}), '(value)\n', (85379, 85386), True, 'import numpy as np\n'), ((85413, 85428), 'numpy.shape', 'np.shape', (['value'], {}), '(value)\n', (85421, 85428), True, 'import numpy as np\n'), ((87337, 87352), 'numpy.shape', 'np.shape', (['value'], {}), '(value)\n', (87345, 87352), True, 'import numpy as np\n'), ((88708, 88736), 'astropy.utils.check_broadcast', 'check_broadcast', (['*all_shapes'], {}), '(*all_shapes)\n', (88723, 88736), False, 'from astropy.utils import sharedmethod, find_current_module, check_broadcast, IncompatibleShapeError, isiterable\n'), ((93921, 93959), 'astropy.table.Table', 'Table', (['columns'], {'names': 'self.param_names'}), '(columns, names=self.param_names)\n', (93926, 93959), False, 'from astropy.table import Table\n'), ((120325, 120339), 'numpy.size', 'np.size', (['value'], {}), '(value)\n', (120332, 120339), True, 'import numpy as np\n'), ((120366, 120381), 'numpy.shape', 'np.shape', (['value'], {}), '(value)\n', (120374, 120381), True, 'import numpy as np\n'), ((130677, 130711), 'numpy.asanyarray', 'np.asanyarray', (['coords'], {'dtype': 'float'}), '(coords, dtype=float)\n', (130690, 130711), True, 'import numpy as np\n'), ((131070, 131101), 'numpy.asanyarray', 'np.asanyarray', (['out'], {'dtype': 'float'}), '(out, dtype=float)\n', (131083, 131101), True, 'import numpy as np\n'), ((141200, 141261), 'functools.partial', 'functools.partial', (['_custom_model_wrapper'], {'fit_deriv': 'fit_deriv'}), '(_custom_model_wrapper, fit_deriv=fit_deriv)\n', (141217, 141261), False, 'import functools\n'), ((145490, 145515), 'numpy.zeros', 'np.zeros', (['coords[0].shape'], {}), '(coords[0].shape)\n', (145498, 145515), True, 'import numpy as np\n'), ((153307, 153349), 'numpy.rollaxis', 'np.rollaxis', (['output', 'pivot', 'model_set_axis'], {}), '(output, pivot, model_set_axis)\n', (153318, 153349), True, 'import numpy as np\n'), ((158045, 158093), 'astropy.units.Quantity', 'Quantity', (['outputs', 'valid_result_unit'], {'copy': '(False)'}), '(outputs, valid_result_unit, copy=False)\n', (158053, 158093), False, 'from astropy.units import Quantity, UnitsError, dimensionless_unscaled\n'), ((159839, 159860), 'numpy.zeros', 'np.zeros', (['input_shape'], {}), '(input_shape)\n', (159847, 159860), True, 'import numpy as np\n'), ((160015, 160026), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (160023, 160026), True, 'import numpy as np\n'), ((7278, 7301), 'inspect.isabstract', 'inspect.isabstract', (['cls'], {}), '(cls)\n', (7296, 7301), False, 'import inspect\n'), ((15983, 16006), 'inspect.isabstract', 'inspect.isabstract', (['cls'], {}), '(cls)\n', (16001, 16006), False, 'import inspect\n'), ((34028, 34043), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (34036, 34043), True, 'import numpy as np\n'), ((57800, 57825), 'numpy.zeros', 'np.zeros', (['coords[0].shape'], {}), '(coords[0].shape)\n', (57808, 57825), True, 'import numpy as np\n'), ((79323, 79360), 'astropy.units.utils.quantity_asanyarray', 'quantity_asanyarray', (['arg'], {'dtype': 'float'}), '(arg, dtype=float)\n', (79342, 79360), False, 'from astropy.units.utils import quantity_asanyarray\n'), ((80261, 80300), 'astropy.units.utils.quantity_asanyarray', 'quantity_asanyarray', (['value'], {'dtype': 'float'}), '(value, dtype=float)\n', (80280, 80300), False, 'from astropy.units.utils import quantity_asanyarray\n'), ((81850, 81864), 'numpy.ndim', 'np.ndim', (['value'], {}), '(value)\n', (81857, 81864), True, 'import numpy as np\n'), ((84939, 84959), 'numpy.array', 'np.array', (['_val.value'], {}), '(_val.value)\n', (84947, 84959), True, 'import numpy as np\n'), ((85061, 85075), 'numpy.array', 'np.array', (['_val'], {}), '(_val)\n', (85069, 85075), True, 'import numpy as np\n'), ((86117, 86134), 'numpy.array', 'np.array', (['[value]'], {}), '([value])\n', (86125, 86134), True, 'import numpy as np\n'), ((90422, 90437), 'numpy.shape', 'np.shape', (['value'], {}), '(value)\n', (90430, 90437), True, 'import numpy as np\n'), ((90671, 90688), 'numpy.array', 'np.array', (['[value]'], {}), '([value])\n', (90679, 90688), True, 'import numpy as np\n'), ((130997, 131022), 'numpy.zeros', 'np.zeros', (['coords[0].shape'], {}), '(coords[0].shape)\n', (131005, 131022), True, 'import numpy as np\n'), ((152494, 152546), 'numpy.rollaxis', 'np.rollaxis', (['_input', 'model_set_axis_input', '(pivot + 1)'], {}), '(_input, model_set_axis_input, pivot + 1)\n', (152505, 152546), True, 'import numpy as np\n'), ((158137, 158162), 'itertools.chain', 'chain', (['inputs', 'parameters'], {}), '(inputs, parameters)\n', (158142, 158162), False, 'from itertools import chain\n'), ((159308, 159331), 'numpy.logical_not', 'np.logical_not', (['nan_ind'], {}), '(nan_ind)\n', (159322, 159331), True, 'import numpy as np\n'), ((27258, 27276), 'copy.deepcopy', 'copy.deepcopy', (['val'], {}), '(val)\n', (27271, 27276), False, 'import copy\n'), ((34179, 34193), 'numpy.size', 'np.size', (['value'], {}), '(value)\n', (34186, 34193), True, 'import numpy as np\n'), ((34866, 35001), 'astropy.units.UnitsError', 'UnitsError', (['f"""The \'{param.name}\' parameter should be given as a Quantity because it was originally initialized as a Quantity"""'], {}), '(\n f"The \'{param.name}\' parameter should be given as a Quantity because it was originally initialized as a Quantity"\n )\n', (34876, 35001), False, 'from astropy.units import Quantity, UnitsError, dimensionless_unscaled\n'), ((40417, 40432), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (40425, 40432), True, 'import numpy as np\n'), ((68537, 68566), 'astropy.utils.isiterable', 'isiterable', (['self.return_units'], {}), '(self.return_units)\n', (68547, 68566), False, 'from astropy.utils import sharedmethod, find_current_module, check_broadcast, IncompatibleShapeError, isiterable\n'), ((90944, 90965), 'astropy.units.Quantity', 'Quantity', (['value', 'unit'], {}), '(value, unit)\n', (90952, 90965), False, 'from astropy.units import Quantity, UnitsError, dimensionless_unscaled\n'), ((116780, 116798), 'astropy.utils.indent', 'indent', (['components'], {}), '(components)\n', (116786, 116798), False, 'from astropy.utils import indent, metadata\n'), ((145879, 145911), 'astropy.nddata.utils.extract_array', 'extract_array', (['c', 'sub_shape', 'pos'], {}), '(c, sub_shape, pos)\n', (145892, 145911), False, 'from astropy.nddata.utils import add_array, extract_array\n'), ((147409, 147450), 'astropy.utils.check_broadcast', 'check_broadcast', (['input_shape', 'param.shape'], {}), '(input_shape, param.shape)\n', (147424, 147450), False, 'from astropy.utils import sharedmethod, find_current_module, check_broadcast, IncompatibleShapeError, isiterable\n'), ((157547, 157573), 'itertools.chain', 'chain', (['vinputs', 'parameters'], {}), '(vinputs, parameters)\n', (157552, 157573), False, 'from itertools import chain\n'), ((157914, 157935), 'numpy.zeros', 'np.zeros', (['input_shape'], {}), '(input_shape)\n', (157922, 157935), True, 'import numpy as np\n'), ((16814, 16849), 'astropy.units.Quantity', 'Quantity', (['default', 'unit'], {'copy': '(False)'}), '(default, unit, copy=False)\n', (16822, 16849), False, 'from astropy.units import Quantity, UnitsError, dimensionless_unscaled\n'), ((18361, 18385), 'inspect.isabstract', 'inspect.isabstract', (['base'], {}), '(base)\n', (18379, 18385), False, 'import inspect\n'), ((34502, 34516), 'numpy.size', 'np.size', (['value'], {}), '(value)\n', (34509, 34516), True, 'import numpy as np\n'), ((58479, 58511), 'astropy.nddata.utils.extract_array', 'extract_array', (['c', 'sub_shape', 'pos'], {}), '(c, sub_shape, pos)\n', (58492, 58511), False, 'from astropy.nddata.utils import add_array, extract_array\n'), ((67812, 67834), 'numpy.any', 'np.any', (['(inputs[i] != 0)'], {}), '(inputs[i] != 0)\n', (67818, 67834), True, 'import numpy as np\n'), ((106986, 107007), 'numpy.zeros', 'np.zeros', (['input_shape'], {}), '(input_shape)\n', (106994, 107007), True, 'import numpy as np\n'), ((131675, 131707), 'astropy.nddata.utils.extract_array', 'extract_array', (['c', 'sub_shape', 'pos'], {}), '(c, sub_shape, pos)\n', (131688, 131707), False, 'from astropy.nddata.utils import add_array, extract_array\n'), ((159559, 159572), 'numpy.array', 'np.array', (['inp'], {}), '(inp)\n', (159567, 159572), True, 'import numpy as np\n'), ((145657, 145668), 'numpy.mean', 'np.mean', (['bb'], {}), '(bb)\n', (145664, 145668), True, 'import numpy as np\n'), ((145670, 145698), 'numpy.ceil', 'np.ceil', (['((bb[1] - bb[0]) / 2)'], {}), '((bb[1] - bb[0]) / 2)\n', (145677, 145698), True, 'import numpy as np\n'), ((58226, 58237), 'numpy.mean', 'np.mean', (['bb'], {}), '(bb)\n', (58233, 58237), True, 'import numpy as np\n'), ((58239, 58267), 'numpy.ceil', 'np.ceil', (['((bb[1] - bb[0]) / 2)'], {}), '((bb[1] - bb[0]) / 2)\n', (58246, 58267), True, 'import numpy as np\n'), ((131422, 131433), 'numpy.mean', 'np.mean', (['bb'], {}), '(bb)\n', (131429, 131433), True, 'import numpy as np\n'), ((131435, 131463), 'numpy.ceil', 'np.ceil', (['((bb[1] - bb[0]) / 2)'], {}), '((bb[1] - bb[0]) / 2)\n', (131442, 131463), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from openff.toolkit.tests.utils import compare_system_energies
from openff.toolkit.topology import Molecule, Topology
from simtk import unit
from openff.system.stubs import ForceField
from openff.system.utils import get_test_file_path
@pytest.mark.parametrize("n_mols", [1, 2])
@pytest.mark.parametrize(
"mol",
[
"C",
"CC", # Adds a proper torsion term(s)
"OC=O", # Simplest molecule with a multi-term torsion
"CCOC", # This hits t86, which has a non-1.0 idivf
"C1COC(=O)O1", # This adds an improper, i2
],
)
def test_from_openmm_single_mols(mol, n_mols):
"""
Test that ForceField.create_openmm_system and System.to_openmm produce
objects with similar energies
TODO: Tighten tolerances
TODO: Test periodic and non-periodic
"""
parsley = ForceField(get_test_file_path("parsley.offxml"))
mol = Molecule.from_smiles(mol)
mol.generate_conformers(n_conformers=1)
top = Topology.from_molecules(n_mols * [mol])
mol.conformers[0] -= np.min(mol.conformers) * unit.angstrom
top.box_vectors = np.eye(3) * np.asarray([10, 10, 10]) * unit.nanometer
if n_mols == 1:
positions = mol.conformers[0]
elif n_mols == 2:
positions = np.vstack(
[mol.conformers[0], mol.conformers[0] + 3 * unit.nanometer]
)
positions = positions * unit.angstrom
toolkit_system = parsley.create_openmm_system(top)
native_system = parsley.create_openff_system(topology=top).to_openmm()
compare_system_energies(
system1=toolkit_system,
system2=native_system,
positions=positions,
box_vectors=top.box_vectors,
)
| [
"openff.toolkit.topology.Topology.from_molecules",
"openff.toolkit.topology.Molecule.from_smiles",
"numpy.eye",
"numpy.asarray",
"numpy.min",
"openff.system.utils.get_test_file_path",
"openff.toolkit.tests.utils.compare_system_energies",
"pytest.mark.parametrize",
"numpy.vstack"
] | [((272, 313), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_mols"""', '[1, 2]'], {}), "('n_mols', [1, 2])\n", (295, 313), False, 'import pytest\n'), ((315, 389), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mol"""', "['C', 'CC', 'OC=O', 'CCOC', 'C1COC(=O)O1']"], {}), "('mol', ['C', 'CC', 'OC=O', 'CCOC', 'C1COC(=O)O1'])\n", (338, 389), False, 'import pytest\n'), ((919, 944), 'openff.toolkit.topology.Molecule.from_smiles', 'Molecule.from_smiles', (['mol'], {}), '(mol)\n', (939, 944), False, 'from openff.toolkit.topology import Molecule, Topology\n'), ((999, 1038), 'openff.toolkit.topology.Topology.from_molecules', 'Topology.from_molecules', (['(n_mols * [mol])'], {}), '(n_mols * [mol])\n', (1022, 1038), False, 'from openff.toolkit.topology import Molecule, Topology\n'), ((1557, 1681), 'openff.toolkit.tests.utils.compare_system_energies', 'compare_system_energies', ([], {'system1': 'toolkit_system', 'system2': 'native_system', 'positions': 'positions', 'box_vectors': 'top.box_vectors'}), '(system1=toolkit_system, system2=native_system,\n positions=positions, box_vectors=top.box_vectors)\n', (1580, 1681), False, 'from openff.toolkit.tests.utils import compare_system_energies\n'), ((870, 906), 'openff.system.utils.get_test_file_path', 'get_test_file_path', (['"""parsley.offxml"""'], {}), "('parsley.offxml')\n", (888, 906), False, 'from openff.system.utils import get_test_file_path\n'), ((1064, 1086), 'numpy.min', 'np.min', (['mol.conformers'], {}), '(mol.conformers)\n', (1070, 1086), True, 'import numpy as np\n'), ((1126, 1135), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1132, 1135), True, 'import numpy as np\n'), ((1138, 1162), 'numpy.asarray', 'np.asarray', (['[10, 10, 10]'], {}), '([10, 10, 10])\n', (1148, 1162), True, 'import numpy as np\n'), ((1281, 1351), 'numpy.vstack', 'np.vstack', (['[mol.conformers[0], mol.conformers[0] + 3 * unit.nanometer]'], {}), '([mol.conformers[0], mol.conformers[0] + 3 * unit.nanometer])\n', (1290, 1351), True, 'import numpy as np\n')] |
import numpy as np
# Setting the random seed, feel free to change it and see different solutions.
np.random.seed(42)
def stepFunction(t):
if t >= 0:
return 1
return 0
def prediction(X, W, b):
return stepFunction((np.matmul(X, W) + b)[0])
# TODO: Fill in the code below to implement the perceptron trick.
# The function should receive as inputs the data X, the labels y,
# the weights W (as an array), and the bias b,
# update the weights and bias W, b, according to the perceptron algorithm,
# and return W and b.
def perceptronStep(X, y, W, b, learn_rate=0.01):
for i in range(len(X)): # loop through dataset
y_hat = prediction(X[i], W, b) # predicting the value for a given data
if y[i] - y_hat == 1: # the point is classified positively
W[0] += X[i][0] * learn_rate # updating value
W[1] += X[i][1] * learn_rate # update value
b += learn_rate # increasing the rate
elif y[i] - y_hat == -1: # the point is classified negatively
W[0] -= X[i][0] * learn_rate
W[1] -= X[i][1] * learn_rate
b -= learn_rate
return W, b
# This function runs the perceptron algorithm repeatedly on the dataset,
# and returns a few of the boundary lines obtained in the iterations,
# for plotting purposes.
# Feel free to play with the learning rate and the num_epochs,
# and see your results plotted below.
def trainPerceptronAlgorithm(X, y, learn_rate=0.01, num_epochs=25):
x_min, x_max = min(X.T[0]), max(X.T[0])
y_min, y_max = min(X.T[1]), max(X.T[1])
W = np.array(np.random.rand(2, 1))
b = np.random.rand(1)[0] + x_max
# These are the solution lines that get plotted below.
boundary_lines = []
for i in range(num_epochs):
# In each epoch, we apply the perceptron step.
W, b = perceptronStep(X, y, W, b, learn_rate)
boundary_lines.append((-W[0] / W[1], -b / W[1]))
return boundary_lines
| [
"numpy.random.rand",
"numpy.random.seed",
"numpy.matmul"
] | [((99, 117), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (113, 117), True, 'import numpy as np\n'), ((1598, 1618), 'numpy.random.rand', 'np.random.rand', (['(2)', '(1)'], {}), '(2, 1)\n', (1612, 1618), True, 'import numpy as np\n'), ((1628, 1645), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (1642, 1645), True, 'import numpy as np\n'), ((238, 253), 'numpy.matmul', 'np.matmul', (['X', 'W'], {}), '(X, W)\n', (247, 253), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Evaluation metrics for classification and regression tasks '''
from .utils import random_name
from swat.cas.table import CASColumn
from swat.cas.table import CASTable
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import warnings
def confusion_matrix(y_true, y_pred, castable=None, labels=None, id_vars=None):
'''
Computes the confusion matrix of a classification task.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth labels. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted class labels. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
labels : list, optional
List of labels that can be used to reorder the matrix or
select the subset of the labels. If ``labels=None``,
all labels are included.
Default=None
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None
Returns
-------
:class:`pandas.DataFrame`
The column index is the predicted class labels.
The row index is the ground truth class labels.
'''
check_results = _check_inputs(y_true, y_pred, castable=castable,
return_target_dtype=True, id_vars=id_vars)
y_true = check_results[0]
y_pred = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
target_dtype = check_results[5]
res = castable.retrieve('crosstab',
_messagelevel='error',
row=y_true, col=y_pred)
conf_mat = res['Crosstab']
# make conf_mat to be a symmetric
# collect class info from rows and cols
row_class = [x.strip() for x in conf_mat.iloc[:,0].values]
col_class = [conf_mat.colinfo[x].label for x in conf_mat.columns.values][1:]
# use union to get the total number of classes
import collections
collections.OrderedDict.fromkeys(row_class).keys()
collections.OrderedDict.fromkeys(col_class).keys()
tot_class = set(row_class).union(set(col_class))
# generate the full matrix
cls_list = list(tot_class)
cls_list.sort()
ret = np.zeros((len(tot_class), len(tot_class))) # dummy array
for i, row in conf_mat.iterrows():
irow = cls_list.index(row.iloc[0].strip())
for j, col in enumerate(conf_mat.iloc[i, 1:]):
icol = cls_list.index(col_class[j])
# print(irow, icol, j, col)
ret[int(irow), int(icol)] = col
import pandas as pd
conf_mat = pd.DataFrame(data=ret, columns=cls_list, index=cls_list)
#change the index column name
conf_mat.index.names = [y_true]
if target_dtype == 'double':
target_index_dtype = np.float64
#conf_mat[y_true] = conf_mat[y_true].astype(target_index_dtype)
elif target_dtype.startswith('int'):
target_index_dtype = getattr(np, target_dtype)
#conf_mat[y_true] = conf_mat[y_true].astype(target_index_dtype)
else:
target_index_dtype = 'str'
conf_mat.index = conf_mat.index.astype(target_index_dtype)
#conf_mat.set_index(y_true, inplace=True)
#conf_mat.columns = conf_mat.index.copy()
#conf_mat.columns.name = y_pred
if tmp_table_created: # if tmp_table_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
if labels is None:
return conf_mat
else:
if not isinstance(labels, list):
labels = [labels]
return conf_mat.iloc[labels, labels]
def accuracy_score(y_true, y_pred, castable=None, normalize=True, id_vars=None):
'''
Computes the classification accuracy score.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth labels. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted class labels. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
normalize : boolean, optional
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
Default = True
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None
Returns
-------
score : float
If ``normalize=False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
'''
check_results = _check_inputs(y_true, y_pred, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_pred = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
# use confusion_matrix to compute accuracy
conf_mat = confusion_matrix(y_true, y_pred, castable=castable, id_vars=id_vars)
# total number of observations
obs_per_class = conf_mat.sum()
tot_obs = sum(obs_per_class)
correct_pred_class = pd.Series(np.diag(conf_mat), index=[conf_mat.index, conf_mat.columns])
tot_correct_pred_obs = sum(correct_pred_class)
if normalize:
score = tot_correct_pred_obs/tot_obs
else:
score = tot_correct_pred_obs
if tmp_table_created: # if tmp_table_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
return score
def plot_roc(y_true, y_score, pos_label, castable=None, cutstep=0.001,
figsize=(8, 8), fontsize_spec=None, linewidth=1, id_vars=None):
'''
Plot the receiver operating characteristic (ROC) curve for binary classification
tasks.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth labels. If it is a string, then
y_score has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_score has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_score
and y_true are :class:`CASColumn`, they can be in different CASTable.
y_score : string or :class:`CASColumn`
The column of estimated probability for the positive class. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_score
and y_true are :class:`CASColumn`, they can be in different CASTable.
pos_label : string, int or float
The positive class label.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_score and y_true are strings.
Default = None
cutstep : float > 0 and < 1, optional
The stepsize of threshold cutoffs.
Default=0.001.
figsize : tuple, optional
The size of the generated figure.
Default=(8, 8).
fontsize_spec : dict, optional
It specifies the fontsize for 'xlabel', 'ylabel', 'xtick', 'ytick'
and 'title'. (e.g. {'xlabel':14, 'ylabel':14}).
If None, it will take the default fontsize, which are
{'xlabel':16, 'ylabel':16, 'xtick':14, 'ytick':14, 'title':20}
Default=None.
linewidth : float, optional
It specify the line width for the ROC curve.
Default=1.
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_score if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_score appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
:class:`matplotlib.axes.Axes`
The x-axis is the false positive rate and the y-axis is the true positive rate.
'''
fontsize = {'xlabel':16, 'ylabel':16, 'xtick':14,
'ytick':14, 'title':20}
if fontsize_spec is not None:
fontsize.update(fontsize_spec)
if not isinstance(pos_label, str):
pos_label = str(pos_label)
check_results = _check_inputs(y_true, y_score, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_score = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
conn.retrieve('loadactionset', _messagelevel = 'error', actionset = 'percentile')
res = conn.retrieve('percentile.assess', _messagelevel = 'error',
table=castable,
inputs=y_score, response=y_true,
event=pos_label, cutstep=cutstep)
if tmp_table_created: # if tmp_tbl_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
rocinfo = res['ROCInfo']
fpr = list(rocinfo.FPR) + [0]
tpr = list(rocinfo.Sensitivity) + [0]
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.plot(fpr, tpr, linestyle='-', linewidth=linewidth)
ax.set_ylim([-0.01, 1.01])
ax.set_xlim([-0.01, 1.01])
ax.plot([0,1], [0,1], linestyle='--', linewidth=linewidth)
ax.set_xlabel('False Positive Rate', fontsize=fontsize['xlabel'])
ax.set_ylabel('True Positive Rate', fontsize=fontsize['ylabel'])
ax.get_xaxis().set_tick_params(direction='out', labelsize=fontsize['xtick'])
ax.get_yaxis().set_tick_params(direction='out', labelsize=fontsize['ytick'])
ax.set_title('ROC curve', fontsize=fontsize['title'])
return ax
def plot_precision_recall(y_true, y_score, pos_label, castable=None, cutstep=0.001,
figsize=(8, 8), fontsize_spec=None, linewidth=1, id_vars=None):
'''
Plot the precision recall(PR) curve for binary classification
tasks.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth labels. If it is a string, then
y_score has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_score has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_score
and y_true are :class:`CASColumn`, they can be in different CASTable.
y_score : string or :class:`CASColumn`
The column of estimated probability for the positive class. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_score
and y_true are :class:`CASColumn`, they can be in different CASTable.
pos_label : string, int or float
The positive class label.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_score and y_true are strings.
Default = None
cutstep : float > 0 and < 1, optional
The stepsize of threshold cutoffs.
Default=0.001.
figsize : tuple, optional
The size of the generated figure.
Default=(8, 8).
fontsize_spec : dict, optional
It specifies the fontsize for 'xlabel', 'ylabel', 'xtick', 'ytick'
and 'title'. (e.g. {'xlabel':14, 'ylabel':14}).
If None, it will take the default fontsize, which are
{'xlabel':16, 'ylabel':16, 'xtick':14, 'ytick':14, 'title':20}
Default=None.
linewidth : float, optional
It specify the line width for the ROC curve.
Default=1.
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_score if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_score appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
:class:`matplotlib.axes.Axes`
The x-axis is the recall(sensitivity) and the y-axis is the precision.
'''
fontsize = {'xlabel':16, 'ylabel':16, 'xtick':14,
'ytick':14, 'title':20}
if fontsize_spec is not None:
fontsize.update(fontsize_spec)
if not isinstance(pos_label, str):
pos_label = str(pos_label)
check_results = _check_inputs(y_true, y_score, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_score = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
conn.retrieve('loadactionset', _messagelevel = 'error', actionset = 'percentile')
res = conn.retrieve('percentile.assess', _messagelevel = 'error',
table=castable,
inputs=y_score, response=y_true,
event=pos_label, cutstep=cutstep)
if tmp_table_created: # if tmp_tbl_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
rocinfo = res['ROCInfo']
rocinfo.loc[rocinfo.TP+ rocinfo.FP == 0, 'FDR'] = 0
fdr = list(rocinfo.FDR) + [0]
precision = [1-x for x in fdr]
recall = list(rocinfo.Sensitivity) + [0]
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.plot(recall, precision, linestyle='-', linewidth=linewidth)
ax.set_ylim([-0.01, 1.01])
ax.set_xlim([-0.01, 1.01])
ax.set_xlabel('Recall', fontsize=fontsize['xlabel'])
ax.set_ylabel('Precision', fontsize=fontsize['ylabel'])
ax.get_xaxis().set_tick_params(direction='out', labelsize=fontsize['xtick'])
ax.get_yaxis().set_tick_params(direction='out', labelsize=fontsize['ytick'])
ax.set_title('Precision-Recall curve', fontsize=fontsize['title'])
return ax
def roc_auc_score(y_true, y_score, pos_label, castable=None, cutstep=0.001, id_vars=None):
'''
Compute the area under the receiver operating characteristic (ROC) curve for binary classification
tasks.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth labels. If it is a string, then
y_score has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_score has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_score
and y_true are :class:`CASColumn`, they can be in different CASTable.
y_score : string or :class:`CASColumn`
The column of estimated probability for the positive class. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_score
and y_true are :class:`CASColumn`, they can be in different CASTable.
pos_label : string, int or float
The positive class label.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_score and y_true are strings.
Default = None
cutstep : float > 0 and < 1, optional
The stepsize of threshold cutoffs.
Default=0.001.
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_score if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_score appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
score : float
'''
if not isinstance(pos_label, str):
pos_label = str(pos_label)
check_results = _check_inputs(y_true, y_score, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_score = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
conn.retrieve('loadactionset', _messagelevel = 'error', actionset = 'percentile')
res = conn.retrieve('percentile.assess', _messagelevel = 'error',
table=castable,
inputs=y_score, response=y_true,
event=pos_label, cutstep=cutstep)
if tmp_table_created: # if tmp_tbl_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
rocinfo = res['ROCInfo']
auc_score = rocinfo.C.loc[0]
return auc_score
def average_precision_score(y_true, y_score, pos_label, castable=None, cutstep=0.001,
interpolate=False, id_vars=None):
'''
Compute the average precision score for binary classification
tasks.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth labels. If it is a string, then
y_score has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_score has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_score
and y_true are :class:`CASColumn`, they can be in different CASTable.
y_score : string or :class:`CASColumn`
The column of estimated probability for the positive class. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_score
and y_true are :class:`CASColumn`, they can be in different CASTable.
pos_label : string, int or float
The positive class label.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_score and y_true are strings.
Default = None
cutstep : float > 0 and < 1, optional
The stepsize of threshold cutoffs.
Default=0.001.
interpolate : boolean, optional
If ``interpolate=True``, it is the area under the precision recall
curve with linear interpolation. Otherwise, it is defined as
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_score if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_score appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
score : float
'''
if not isinstance(pos_label, str):
pos_label = str(pos_label)
check_results = _check_inputs(y_true, y_score, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_score = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
conn.retrieve('loadactionset', _messagelevel = 'error', actionset = 'percentile')
res = conn.retrieve('percentile.assess', _messagelevel = 'error',
table=castable,
inputs=y_score, response=y_true,
event=pos_label, cutstep=cutstep)
if tmp_table_created: # if tmp_tbl_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
rocinfo = res['ROCInfo']
rocinfo.loc[rocinfo.TP+ rocinfo.FP == 0, 'FDR'] = 0
fdr = list(rocinfo.FDR) + [0]
precision = [1-x for x in fdr]
recall = list(rocinfo.Sensitivity) + [0]
if interpolate:
#Calculate the area under the PR curve using trapezoidal rule, with linear interpolation
ap = sum([np.mean(precision[i:i+2])*(recall[i]-recall[i+1])
for i in range(len(recall)-1)])
else:
#Use the formulation same as scikit-learn without linear interpolation.
#https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html
ap = sum([precision[i]*(recall[i]-recall[i+1])
for i in range(len(recall)-1)])
return ap
def f1_score(y_true, y_pred, pos_label, castable=None, id_vars=None):
'''
Compute the f1 score of the binary classification task. f1 score is defined as
:math:`\frac{2PR}{P+R}`, where :math:`P` is the precision and :math:`R` is
the recall.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth labels. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted class labels. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
pos_label : string, int or float
The positive class label.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
score : float
'''
conf_mat = confusion_matrix(y_true, y_pred, castable=castable, id_vars=id_vars)
recall = conf_mat.iloc[pos_label, pos_label]/conf_mat.iloc[pos_label, :].sum()
precision = conf_mat.iloc[pos_label, pos_label]/conf_mat.iloc[:, pos_label].sum()
f1 = 2*precision*recall/(precision + recall)
return f1
def explained_variance_score(y_true, y_pred, castable=None, id_vars=None):
'''
Compute the explained variance score for a regression task. It is the
fraction of the target variable variance that is explained by the model.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth target values. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted target values. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
score : float
'''
check_results = _check_inputs(y_true, y_pred, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_pred = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
error_colname = 'err'
# check whether error_colname is already in the castable,
# to avoid duplication or overwrite when creating computedvars.
while error_colname in castable.columns:
error_colname = random_name(name='err_')
castbl_params = {}
castbl_params['computedvars'] = [{"name":error_colname}]
code = '{2}={0}-{1}'.format(y_true, y_pred, error_colname)
castbl_params['computedvarsprogram'] = code
castable = conn.CASTable(castable.name, **castbl_params)
total_var = castable[y_true].var()
err_var = castable[error_colname].var()
expl_var = 1 - err_var/total_var
if tmp_table_created: # if tmp_table_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
return expl_var
def mean_absolute_error(y_true, y_pred, castable=None, id_vars=None):
'''
Compute the mean absolute error of a regression task.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth target values. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted target values. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
loss : float
'''
check_results = _check_inputs(y_true, y_pred, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_pred = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
error_colname = 'abserr'
# check whether error_colname is already in the castable,
# to avoid duplication or overwrite when creating computedvars.
while error_colname in castable.columns:
error_colname = random_name(name='abserr_')
castbl_params = {}
castbl_params['computedvars'] = [{"name":error_colname}]
code = '{2}=abs({0}-{1})'.format(y_true, y_pred, error_colname)
castbl_params['computedvarsprogram'] = code
castable = conn.CASTable(castable.name, **castbl_params)
mae = castable[error_colname].mean()
if tmp_table_created: # if tmp_table_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
return mae
def mean_squared_error(y_true, y_pred, castable=None, id_vars=None):
'''
Compute the mean squared error of a regression task.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth target values. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted target values. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
loss : float
'''
check_results = _check_inputs(y_true, y_pred, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_pred = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
error_colname = 'err2'
# check whether error_colname is already in the castable,
# to avoid duplication or overwrite when creating computedvars.
while error_colname in castable.columns:
error_colname = random_name(name='err2_')
castbl_params = {}
castbl_params['computedvars'] = [{"name":error_colname}]
code = '{2}=({0}-{1})**2'.format(y_true, y_pred, error_colname)
castbl_params['computedvarsprogram'] = code
castable = conn.CASTable(castable.name, **castbl_params)
mse = castable[error_colname].mean()
if tmp_table_created: # if tmp_table_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
return mse
def mean_squared_log_error(y_true, y_pred, castable=None, id_vars=None):
'''
Compute the mean squared logarithmic error of the regression tasks.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth target values. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted target values. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
loss : float
'''
check_results = _check_inputs(y_true, y_pred, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_pred = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
error_colname = 'logerr2'
# check whether error_colname is already in the castable,
# to avoid duplication or overwrite when creating computedvars.
while error_colname in castable.columns:
error_colname = random_name(name='logerr2_')
castbl_params = {}
castbl_params['computedvars'] = [{"name":error_colname}]
code = '{2}=(log(1+{0})-log(1+{1}))**2'.format(y_true, y_pred, error_colname)
castbl_params['computedvarsprogram'] = code
castable = conn.CASTable(castable.name, **castbl_params)
logerr2 = castable[error_colname].mean()
if tmp_table_created: # if tmp_table_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
return logerr2
def r2_score(y_true, y_pred, castable=None, id_vars=None):
'''
Compute the R^2 (coefficient of determination) regression score.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth target values. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted target values. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
loss : float
'''
mse = mean_squared_error(y_true, y_pred, castable=castable, id_vars=id_vars)
check_results = _check_inputs(y_true, y_pred, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_pred = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
nobs = castable[[y_true, y_pred]].dropna().shape[0]
sse = nobs*mse
tss = castable[y_true].var()*(nobs-1)
r2 = 1- sse/tss
if tmp_table_created: # if tmp_table_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
return r2
def _check_inputs(y_true, y_pred, castable=None, return_target_dtype=False, id_vars=None):
'''
Check the input argument y_true, y_pred, and return their names if they are CASColumn.
If y_true, and y_pred is in the form of CASColumn and from different CASTables,
a temporary CASTable will be created which contains both columns.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth labels. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted class labels. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
return_target_dtype : boolean, optional
If True, return the data type of y_true in the CASTable.
Default = False
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
y_true : string
The column name of the y_true column.
y_pred : string
The column name of the y_pred column.
castable : :class:`CASTable`
The original CASTable if y_true and y_pred are in the same castable. The
temporary table that contain both columns if y_true and y_pred is from
different CASTable.
conn : :class:`CAS`
The connection on the CASColumn or CASTable.
tmp_table_created : boolean
Whether a temporary CASTable is created to host y_true and y_pred.
target_dtype : string
The data type of y_true in the CASTable.
Only provided if `return_target_dtype` is True.
'''
tmp_table_created = False
if isinstance(y_pred, str) and isinstance(y_true, str):
if not isinstance(castable, CASTable):
raise ValueError('castable need to be a CASTable if y_true and y_pred are strings')
conn = castable.get_connection()
if return_target_dtype:
colinfo = castable.columninfo().ColumnInfo
target_dtype = colinfo.Type[colinfo.Column==y_true].values[0]
elif isinstance(y_pred, CASColumn) and isinstance(y_true, CASColumn):
conn = y_true.get_connection()
y_true_tblname = y_true.to_outtable_params()['name']
y_pred_tblname = y_pred.to_outtable_params()['name']
if return_target_dtype:
colinfo = y_true.columninfo().ColumnInfo
target_dtype = colinfo.Type[colinfo.Column==y_true.name].values[0]
y_true = y_true.name
y_pred = y_pred.name
if y_true_tblname != y_pred_tblname:
tmp_table_name = random_name('metric_tmp',6)
if id_vars is None:
warnings.warn('{} and {} are from different CASTables, '.format(y_true, y_pred) +
'and their appropriate matching may not be guaranteed '+
'unless id_vars argument is provided.')
sascode = '''
data {};
merge {}(keep={}) {}(keep={});
run;
'''.format(tmp_table_name, y_true_tblname, y_true, y_pred_tblname, y_pred)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sascode, single='Yes')
else:
if not isinstance(id_vars, list):
id_vars = [id_vars]
y_true_keep = ' '.join([y_true]+id_vars)
y_pred_keep = ' '.join([y_pred]+id_vars)
by_var = ' '.join(id_vars)
sascode = '''
data {};
merge {}(keep={}) {}(keep={});
by {};
run;
'''.format(tmp_table_name, y_true_tblname, y_true_keep, y_pred_tblname, y_pred_keep, by_var)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sascode)
castable = conn.CASTable(tmp_table_name)
tmp_table_created = True
else:
castable = conn.CASTable(y_true_tblname)
else:
raise ValueError('Input for ground truth and predicted value need to be the same type of either '+
'strings representing column names or CASColumns')
if return_target_dtype:
return (y_true, y_pred, castable, conn, tmp_table_created, target_dtype)
else:
return (y_true, y_pred, castable, conn, tmp_table_created)
| [
"pandas.DataFrame",
"collections.OrderedDict.fromkeys",
"numpy.mean",
"numpy.diag",
"matplotlib.pyplot.subplots"
] | [((4395, 4451), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'ret', 'columns': 'cls_list', 'index': 'cls_list'}), '(data=ret, columns=cls_list, index=cls_list)\n', (4407, 4451), True, 'import pandas as pd\n'), ((12386, 12421), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'figsize'}), '(1, 1, figsize=figsize)\n', (12398, 12421), True, 'import matplotlib.pyplot as plt\n'), ((16961, 16996), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'figsize'}), '(1, 1, figsize=figsize)\n', (16973, 16996), True, 'import matplotlib.pyplot as plt\n'), ((8058, 8075), 'numpy.diag', 'np.diag', (['conf_mat'], {}), '(conf_mat)\n', (8065, 8075), True, 'import numpy as np\n'), ((3767, 3810), 'collections.OrderedDict.fromkeys', 'collections.OrderedDict.fromkeys', (['row_class'], {}), '(row_class)\n', (3799, 3810), False, 'import collections\n'), ((3822, 3865), 'collections.OrderedDict.fromkeys', 'collections.OrderedDict.fromkeys', (['col_class'], {}), '(col_class)\n', (3854, 3865), False, 'import collections\n'), ((24009, 24036), 'numpy.mean', 'np.mean', (['precision[i:i + 2]'], {}), '(precision[i:i + 2])\n', (24016, 24036), True, 'import numpy as np\n')] |
from typing import Callable, Type, Union
import numpy as np
import numpy.typing as npt
def init(shape: Union[int, tuple[int, int]],
setting: str = "simple",
dtype: Type[float] = int,
k: int = 2) -> npt.NDArray[float]:
if setting == "simple":
a = np.zeros(shape, dtype=dtype)
if isinstance(shape, int):
a[len(a) // 2] = dtype(k - 1)
else:
a[a.shape[0] // 2][a.shape[1] // 2] = dtype(k - 1)
return a
elif setting == "random":
return np.random.randint(k, size=shape, dtype=dtype)
else:
raise ValueError(f"'{setting}' is not a valid init preset name; supported names are: 'simple', 'random'")
class CellularAutomaton1D:
def __init__(
self,
init: Union[list[float], npt.NDArray[float]],
neighbors: list[tuple[int, int]],
apply: Callable[[dict[tuple[int, int]]], float]
):
self.generations = [init]
self.neighbors = neighbors
self.apply = apply
self.width = len(init)
def evolve(self, generations: int = 1) -> None:
for _ in range(generations):
self.generations.append(self._generation())
def _generation(self) -> list[float]:
next_generation = []
for x in range(self.width):
next_generation.append(self.apply(self._get_neighbors(x)))
return next_generation
def _get_neighbors(self, x: int) -> dict[tuple[int, int], float]:
neighbors = {}
for neighbor in self.neighbors:
dx, dy = neighbor
neighbors[neighbor] = self.generations[(-1 + dy) % len(self.generations)][(x + dx) % self.width]
return neighbors
class CellularAutomaton2D:
def __init__(
self,
init: Union[list[list[float]], npt.NDArray[npt.NDArray[float]]],
neighbors: list[tuple[int, int]],
apply: Callable[[dict[tuple[int, int], float], float], float]
):
self.generations = [init]
self.neighbors = neighbors
self.apply = apply
self.width = len(init[0])
self.height = len(init)
def evolve(self, generations: int = 1) -> None:
for _ in range(generations):
self.generations.append(self._generation())
def _generation(self) -> list[list[float]]:
next_generation = []
for y in range(self.height):
next_row = []
for x in range(self.width):
next_row.append(self.apply(self._get_neighbors(x, y), self.generations[-1][y][x]))
next_generation.append(next_row)
return next_generation
def _get_neighbors(self, x: int, y: int) -> dict[tuple[int, int], float]:
neighbors = {}
for neighbor in self.neighbors:
dx, dy = neighbor
neighbors[neighbor] = self.generations[-1][(y + dy) % self.width][(x + dx) % self.width]
return neighbors
| [
"numpy.random.randint",
"numpy.zeros"
] | [((288, 316), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (296, 316), True, 'import numpy as np\n'), ((535, 580), 'numpy.random.randint', 'np.random.randint', (['k'], {'size': 'shape', 'dtype': 'dtype'}), '(k, size=shape, dtype=dtype)\n', (552, 580), True, 'import numpy as np\n')] |
import json
import numpy as np
from tensorflow import keras
from colorama import Fore, Style, Back
import pickle
import colorama
colorama.init()
with open("intents.json") as file:
data = json.load(file)
def chat():
# load trained model
model = keras.models.load_model('chat_model_college')
# load tokenizer object
with open('tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
# load label encoder object
with open('label_encoder.pickle', 'rb') as enc:
lbl_encoder = pickle.load(enc)
# parameters
max_len = 20
while True:
print(Fore.LIGHTBLUE_EX + "User: " + Style.RESET_ALL, end="")
inp = input()
if inp.lower() == "quit":
break
result = model.predict(keras.preprocessing.sequence.pad_sequences(tokenizer.texts_to_sequences([inp]),
truncating='post', maxlen=max_len))
tag = lbl_encoder.inverse_transform([np.argmax(result)])
for i in data['intents']:
if i['tag'] == tag:
print(Fore.GREEN + "ChatBot:" + Style.RESET_ALL, np.random.choice(i['responses']))
# print(Fore.GREEN + "ChatBot:" + Style.RESET_ALL,random.choice(responses))
# print(Fore.YELLOW + "Start messaging with the bot (type quit to stop)!" + Style.RESET_ALL)
chat()
| [
"colorama.init",
"json.load",
"tensorflow.keras.models.load_model",
"numpy.argmax",
"pickle.load",
"numpy.random.choice"
] | [((137, 152), 'colorama.init', 'colorama.init', ([], {}), '()\n', (150, 152), False, 'import colorama\n'), ((205, 220), 'json.load', 'json.load', (['file'], {}), '(file)\n', (214, 220), False, 'import json\n'), ((277, 322), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['"""chat_model_college"""'], {}), "('chat_model_college')\n", (300, 322), False, 'from tensorflow import keras\n'), ((427, 446), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (438, 446), False, 'import pickle\n'), ((558, 574), 'pickle.load', 'pickle.load', (['enc'], {}), '(enc)\n', (569, 574), False, 'import pickle\n'), ((1051, 1068), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (1060, 1068), True, 'import numpy as np\n'), ((1207, 1239), 'numpy.random.choice', 'np.random.choice', (["i['responses']"], {}), "(i['responses'])\n", (1223, 1239), True, 'import numpy as np\n')] |
"""Wrapper for creating the ant environment in gym_mujoco."""
import math
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class PointEnv(mujoco_env.MujocoEnv, utils.EzPickle):
FILE = "point.xml"
ORI_IND = 2
def __init__(self, file_path=None, expose_all_qpos=True):
self._expose_all_qpos = expose_all_qpos
mujoco_env.MujocoEnv.__init__(self, file_path, 1)
utils.EzPickle.__init__(self)
@property
def physics(self):
return self.model
def _step(self, a):
return self.step(a)
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent
# Center point is (4,4,4)
self.viewer.cam.lookat[0] = 4
self.viewer.cam.lookat[1] = 4
self.viewer.cam.lookat[2] = 4
self.viewer.cam.trackbodyid = 1
self.viewer.cam.elevation = -90
def step(self, action):
action[0] = 0.2 * action[0]
qpos = np.copy(self.data.qpos)
qpos[2] += action[1]
ori = qpos[2]
# compute increment in each direction
dx = math.cos(ori) * action[0]
dy = math.sin(ori) * action[0]
# ensure that the robot is within reasonable range
qpos[0] = np.clip(qpos[0] + dx, -100, 100)
qpos[1] = np.clip(qpos[1] + dy, -100, 100)
qvel = self.data.qvel
self.set_state(qpos, qvel)
for _ in range(0, self.frame_skip):
self.sim.step()
next_obs = self._get_obs()
reward = 0
done = False
info = {}
'''
site_id = self.sim.model.site_name2id('target0')
print('site_pos', self.sim.model.site_pos[site_id])
'''
return next_obs, reward, done, info
def _get_obs(self):
if self._expose_all_qpos:
return np.concatenate([
self.data.qpos.flat[:3], # Only point-relevant coords.
self.data.qvel.flat[:3]])
return np.concatenate([
self.data.qpos.flat[2:3],
self.data.qvel.flat[:3]])
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(
size=self.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
# Set everything other than point to original position and 0 velocity.
qpos[3:] = self.init_qpos[3:]
qvel[3:] = 0.
self.set_state(qpos, qvel)
return self._get_obs()
def get_ori(self):
return self.data.qpos[self.__class__.ORI_IND]
def set_xy(self, xy):
qpos = np.copy(self.data.qpos)
qpos[0] = xy[0]
qpos[1] = xy[1]
qvel = self.data.qvel
self.set_state(qpos, qvel)
def get_xy(self):
qpos = np.copy(self.data.qpos)
return qpos[:2]
def render_callback(self, goal):
goal = goal.copy()
# Visualize target.
sites_offset = (self.sim.data.site_xpos - self.sim.model.site_pos).copy()
site_id = self.sim.model.site_name2id('target0')
goal = np.concatenate([goal, sites_offset[0][-1:]])
# self.sim.model.site_pos[site_id] = goal - sites_offset[0]
self.sim.model.site_pos[site_id] = goal
self.sim.forward()
| [
"gym.utils.EzPickle.__init__",
"gym.envs.mujoco.mujoco_env.MujocoEnv.__init__",
"numpy.copy",
"numpy.clip",
"math.sin",
"math.cos",
"numpy.concatenate"
] | [((382, 431), 'gym.envs.mujoco.mujoco_env.MujocoEnv.__init__', 'mujoco_env.MujocoEnv.__init__', (['self', 'file_path', '(1)'], {}), '(self, file_path, 1)\n', (411, 431), False, 'from gym.envs.mujoco import mujoco_env\n'), ((440, 469), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (463, 469), False, 'from gym import utils\n'), ((994, 1017), 'numpy.copy', 'np.copy', (['self.data.qpos'], {}), '(self.data.qpos)\n', (1001, 1017), True, 'import numpy as np\n'), ((1270, 1302), 'numpy.clip', 'np.clip', (['(qpos[0] + dx)', '(-100)', '(100)'], {}), '(qpos[0] + dx, -100, 100)\n', (1277, 1302), True, 'import numpy as np\n'), ((1321, 1353), 'numpy.clip', 'np.clip', (['(qpos[1] + dy)', '(-100)', '(100)'], {}), '(qpos[1] + dy, -100, 100)\n', (1328, 1353), True, 'import numpy as np\n'), ((1997, 2064), 'numpy.concatenate', 'np.concatenate', (['[self.data.qpos.flat[2:3], self.data.qvel.flat[:3]]'], {}), '([self.data.qpos.flat[2:3], self.data.qvel.flat[:3]])\n', (2011, 2064), True, 'import numpy as np\n'), ((2643, 2666), 'numpy.copy', 'np.copy', (['self.data.qpos'], {}), '(self.data.qpos)\n', (2650, 2666), True, 'import numpy as np\n'), ((2831, 2854), 'numpy.copy', 'np.copy', (['self.data.qpos'], {}), '(self.data.qpos)\n', (2838, 2854), True, 'import numpy as np\n'), ((3130, 3174), 'numpy.concatenate', 'np.concatenate', (['[goal, sites_offset[0][-1:]]'], {}), '([goal, sites_offset[0][-1:]])\n', (3144, 3174), True, 'import numpy as np\n'), ((1128, 1141), 'math.cos', 'math.cos', (['ori'], {}), '(ori)\n', (1136, 1141), False, 'import math\n'), ((1167, 1180), 'math.sin', 'math.sin', (['ori'], {}), '(ori)\n', (1175, 1180), False, 'import math\n'), ((1851, 1917), 'numpy.concatenate', 'np.concatenate', (['[self.data.qpos.flat[:3], self.data.qvel.flat[:3]]'], {}), '([self.data.qpos.flat[:3], self.data.qvel.flat[:3]])\n', (1865, 1917), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.